function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
---|---|---|---|---|---|---|
setup_exceptionhook | Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler. | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
# MASKED: setup_exceptionhook function (lines 1510-1531)
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() | def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook | 1,510 | 1,531 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
|
getpwd | Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function. | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
# MASKED: getpwd function (lines 1571-1650)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() | def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
) | 1,571 | 1,650 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
|
dlabspath | Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath? | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
# MASKED: dlabspath function (lines 1694-1706)
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() | def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path | 1,694 | 1,706 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
|
knows_annex | Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch. | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
# MASKED: knows_annex function (lines 1773-1787)
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() | def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex() | 1,773 | 1,787 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
|
get_trace | Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical. | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
# MASKED: get_trace function (lines 1912-1965)
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() | def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None | 1,912 | 1,965 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
|
get_dataset_root | Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
# MASKED: get_dataset_root function (lines 1968-2009)
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() | def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None | 1,968 | 2,009 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
|
try_multiple_dec | Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug). | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
# MASKED: try_multiple_dec function (lines 2027-2087)
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() | @optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec | 2,027 | 2,087 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
|
open_r_encdetect | Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
# MASKED: open_r_encdetect function (lines 2136-2158)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() | def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc) | 2,136 | 2,158 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
|
import_modules | Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
# MASKED: import_modules function (lines 2222-2256)
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() | def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded | 2,222 | 2,256 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
|
lmtime | Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
# MASKED: lmtime function (lines 673-693)
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() | def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime)) | 673 | 693 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
|
_match_datetime_pattern | Match the datetime pattern at the beginning of the token list.
There are several formats that this method needs to understand
and distinguish between (see MongoDB's SERVER-7965):
ctime-pre2.4 Wed Dec 31 19:00:00
ctime Wed Dec 31 19:00:00.000
iso8601-utc 1970-01-01T00:00:00.000Z
iso8601-local 1969-12-31T19:00:00.000+0500 | #!/bin/python
import json
import re
import sys
from datetime import datetime
import dateutil.parser
from dateutil.tz import tzutc
from six.moves import range
from mtools.util.pattern import json2pattern
class DateTimeEncoder(json.JSONEncoder):
"""Custom datetime encoder for json output."""
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class LogEvent(object):
"""
Extract information from log line and store properties/variables.
line_str: the original line string
split_tokens: a list of string tokens after splitting line_str using
whitespace as split points
datetime: a datetime object for the logevent. For logfiles created with
version 2.4+, it also contains micro-seconds
duration: the duration of a timed operation in ms
thread: the thread name (e.g. "conn1234") as string
operation: insert, update, remove, query, command, getmore, None
namespace: the namespace of the operation, or None
command: the type of command, if the operation was a "command"
pattern: the query pattern for queries, updates, counts, etc
...
Certain operations also add the number of affected/scanned documents.
If applicable, the following variables are also set, otherwise the
default is None: nscanned, ntoreturn, nreturned, ninserted, nupdated
For performance reason, all fields are evaluated lazily upon first
request.
"""
# datetime handler for json encoding
dthandler = lambda obj: obj.isoformat() if isinstance(obj,
datetime) else None
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec']
log_operations = ['query', 'insert', 'update', 'remove', 'getmore',
'command']
log_levels = ['D', 'F', 'E', 'W', 'I', 'U']
log_components = ['-', 'ACCESS', 'COMMAND', 'CONTROL', 'GEO', 'INDEX',
'NETWORK', 'QUERY', 'REPL', 'SHARDING', 'STORAGE',
'JOURNAL', 'WRITE', 'TOTAL']
def __init__(self, doc_or_str):
self._year_rollover = False
if isinstance(doc_or_str, bytes):
doc_or_str = doc_or_str.decode("utf-8")
if isinstance(doc_or_str, str) or (sys.version_info.major == 2 and
isinstance(doc_or_str, unicode)):
# create from string, remove line breaks at end of _line_str
self.from_string = True
self._line_str = doc_or_str.rstrip()
self._profile_doc = None
self._reset()
else:
self.from_string = False
self._profile_doc = doc_or_str
# docs don't need to be parsed lazily, they are fast
self._parse_document()
def _reset(self):
self._split_tokens_calculated = False
self._split_tokens = None
self._duration_calculated = False
self._duration = None
self._datetime_calculated = False
self._datetime = None
self._datetime_nextpos = None
self._datetime_format = None
self._datetime_str = ''
self._thread_calculated = False
self._thread = None
self._operation_calculated = False
self._operation = None
self._namespace = None
self._pattern = None
self._sort_pattern = None
self._command_calculated = False
self._command = None
self._counters_calculated = False
# TODO: refactor from the legacy names to modern
# (eg: nscanned => keysExamined). Currently _extract_counters()
# maps newer property names into legacy equivalents for
# broader log file support.
self._nscanned = None # keysExamined
self._nscannedObjects = None # docsExamined
self._ntoreturn = None
self._nupdated = None # nModified
self._nreturned = None # nReturned or nMatched (updates)
self._ninserted = None # nInserted
self._ndeleted = None # nDeleted
self._numYields = None
self._planSummary = None
self._writeConflicts = None
self._r = None
self._w = None
self._conn = None
self._level_calculated = False
self._level = None
self._component = None
self.merge_marker_str = ''
def set_line_str(self, line_str):
"""
Set line_str.
Line_str is only writeable if LogEvent was created from a string,
not from a system.profile documents.
"""
if not self.from_string:
raise ValueError("can't set line_str for LogEvent created from "
"system.profile documents.")
if line_str != self._line_str:
self._line_str = line_str.rstrip()
self._reset()
def get_line_str(self):
"""Return line_str depending on source, logfile or system.profile."""
if self.from_string:
return ' '.join([s for s in [self.merge_marker_str,
self._datetime_str,
self._line_str] if s])
else:
return ' '.join([s for s in [self._datetime_str,
self._line_str] if s])
line_str = property(get_line_str, set_line_str)
@property
def split_tokens(self):
"""Split string into tokens (lazy)."""
if not self._split_tokens_calculated:
# split into items (whitespace split)
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True
return self._split_tokens
@property
def duration(self):
"""Calculate duration if available (lazy)."""
if not self._duration_calculated:
self._duration_calculated = True
# split_tokens = self.split_tokens
line_str = self.line_str
if (line_str
and line_str.endswith('ms')
and 'Scheduled new oplog query' not in line_str):
try:
# find duration from end
space_pos = line_str.rfind(" ")
if space_pos == -1:
return
self._duration = int(line_str[line_str.rfind(" ") +
1:-2].replace(',', ''))
except ValueError:
self._duration = None
elif "flushing" in self.line_str:
matchobj = re.search(r'flushing mmaps took (\d+)ms',
self.line_str)
if matchobj:
self._duration = int(matchobj.group(1))
return self._duration
@property
def datetime(self):
"""Extract datetime if available (lazy)."""
if not self._datetime_calculated:
self._datetime_calculated = True
# if no datetime after 10 tokens, break to avoid parsing
# very long lines
split_tokens = self.split_tokens[:10]
for offs in range(len(split_tokens)):
dt = self._match_datetime_pattern(split_tokens[offs:offs + 4])
if dt:
self._datetime = dt
self._datetime_nextpos = offs
if self._datetime_format.startswith("iso8601"):
self._datetime_nextpos += 1
else:
self._datetime_nextpos += 4
# separate datetime str and linestr
self._line_str = (' '.join(self.split_tokens
[self._datetime_nextpos:]))
if self.level:
self._datetime_nextpos += 2
self._reformat_timestamp(self._datetime_format)
break
return self._datetime
@property
def datetime_format(self):
if not self._datetime_calculated:
_ = self.datetime
return self._datetime_format
@property
def datetime_nextpos(self):
if self._datetime_nextpos is None and not self._datetime_calculated:
_ = self.datetime
return self._datetime_nextpos
def set_datetime_hint(self, format, nextpos, rollover):
self._datetime_format = format
self._datetime_nextpos = nextpos
self._year_rollover = rollover
# Fast check if timestamp format changed.
# If it has, trigger datetime evaluation.
if format.startswith('ctime'):
if (len(self.split_tokens) < 4 or
self.split_tokens[self._datetime_nextpos - 4] not in
self.weekdays):
_ = self.datetime
return False
return True
else:
if len(self.split_tokens) == 0:
# empty line, no need to parse datetime
self._datetime_calculated = True
return False
try:
if not (self.split_tokens[self._datetime_nextpos - 1][0]
.isdigit()):
# not the timestamp format that was hinted
_ = self.datetime
return False
except Exception:
pass
return True
# MASKED: _match_datetime_pattern function (lines 279-330)
@property
def thread(self):
"""Extract thread name if available (lazy)."""
if not self._thread_calculated:
self._thread_calculated = True
split_tokens = self.split_tokens
if not self.datetime_nextpos:
return None
if len(split_tokens) <= self.datetime_nextpos:
return None
connection_token = split_tokens[self.datetime_nextpos]
match = re.match(r'^\[([^\]]*)\]$', connection_token)
if match:
self._thread = match.group(1)
if self._thread is not None:
if self._thread in ['initandlisten', 'mongosMain']:
if len(split_tokens) >= 5 and split_tokens[-5][0] == '#':
self._conn = 'conn' + split_tokens[-5][1:]
elif self._thread.startswith('conn'):
self._conn = self._thread
return self._thread
@property
def conn(self):
r"""
Extract conn name if available (lazy).
This value is None for all lines except the log lines related to
connections, that is lines matching '\[conn[0-9]+\]' or
'\[(initandlisten|mongosMain)\] .* connection accepted from'.
"""
self.thread
return self._conn
@property
def operation(self):
"""
Extract operation if available (lazy).
Operations: query, insert, update, remove, getmore, command
"""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._operation
@property
def namespace(self):
"""Extract namespace if available (lazy)."""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._namespace
def _extract_operation_and_namespace(self):
"""
Helper method to extract both operation and namespace from a logevent.
It doesn't make sense to only extract one as they appear back to back
in the token list.
"""
split_tokens = self.split_tokens
if not self._datetime_nextpos:
# force evaluation of thread to get access to datetime_offset and
# to protect from changes due to line truncation.
_ = self.thread
if not self._datetime_nextpos or (len(split_tokens) <=
self._datetime_nextpos + 2):
return
op = split_tokens[self._datetime_nextpos + 1].lower()
if op == 'warning:':
# check if this log line got truncated
if ("warning: log line attempted" in self._line_str and
"over max size" in self._line_str):
self._datetime_nextpos = split_tokens.index('...')
op = split_tokens[self._datetime_nextpos + 1]
else:
# unknown warning, bail out
return
if op in self.log_operations:
self._operation = op
self._namespace = split_tokens[self._datetime_nextpos + 2]
@property
def pattern(self):
"""Extract query pattern from operations."""
if not self._pattern:
# trigger evaluation of operation
if (self.operation in ['query', 'getmore', 'update', 'remove'] or
self.command in ['count', 'findandmodify']):
self._pattern = self._find_pattern('query: ')
elif self.command == 'find':
self._pattern = self._find_pattern('filter: ')
return self._pattern
@property
def sort_pattern(self):
"""Extract query pattern from operations."""
if not self._sort_pattern:
# trigger evaluation of operation
if self.operation in ['query', 'getmore']:
self._sort_pattern = self._find_pattern('orderby: ')
return self._sort_pattern
@property
def command(self):
"""Extract query pattern from operations."""
if not self._command_calculated:
self._command_calculated = True
if self.operation == 'command':
try:
command_idx = self.split_tokens.index('command:')
command = self.split_tokens[command_idx + 1]
if command == '{':
# workaround for <= 2.2 log files,
# where command was not listed separately
command = self.split_tokens[command_idx + 2][:-1]
self._command = command.lower()
except ValueError:
pass
return self._command
@property
def nscanned(self):
"""Extract nscanned or keysExamined counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscanned
@property
def nscannedObjects(self):
"""
Extract counters if available (lazy).
Looks for nscannedObjects or docsExamined.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscannedObjects
@property
def ntoreturn(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ntoreturn
@property
def writeConflicts(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._writeConflicts
@property
def nreturned(self):
"""
Extract counters if available (lazy).
Looks for nreturned, nReturned, or nMatched counter.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nreturned
@property
def ninserted(self):
"""Extract ninserted or nInserted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ninserted
@property
def ndeleted(self):
"""Extract ndeleted or nDeleted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ndeleted
@property
def nupdated(self):
"""Extract nupdated or nModified counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nupdated
@property
def numYields(self):
"""Extract numYields counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._numYields
@property
def planSummary(self):
"""Extract numYields counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._planSummary
@property
def r(self):
"""Extract read lock (r) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._r
@property
def w(self):
"""Extract write lock (w) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._w
def _extract_counters(self):
"""Extract counters like nscanned and nreturned from the logevent."""
# extract counters (if present)
counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned',
'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields',
'planSummary', 'writeConflicts', 'keyUpdates']
# TODO: refactor mtools to use current counter names throughout
# Transitionary hack: mapping of current names into prior equivalents
counter_equiv = {
'docsExamined': 'nscannedObjects',
'keysExamined': 'nscanned',
'nDeleted': 'ndeleted',
'nInserted': 'ninserted',
'nMatched': 'nreturned',
'nModified': 'nupdated'
}
counters.extend(counter_equiv.keys())
split_tokens = self.split_tokens
# trigger operation evaluation to get access to offset
if self.operation:
for t, token in enumerate(split_tokens[self.datetime_nextpos +
2:]):
for counter in counters:
if token.startswith('%s:' % counter):
try:
# Remap counter to standard name, if applicable
counter = counter_equiv.get(counter, counter)
vars(self)['_' + counter] = int((token.split(':')
[-1]).replace(',',
''))
except ValueError:
# see if this is a pre-2.5.2 numYields with space
# in between (e.g. "numYields: 2")
# https://jira.mongodb.org/browse/SERVER-10101
if (counter == 'numYields' and
token.startswith('numYields')):
try:
self._numYields = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
except ValueError:
pass
if (counter == 'planSummary' and
token.startswith('planSummary')):
try:
self._planSummary = split_tokens[t + 1 + self.datetime_nextpos + 2]
except ValueError:
pass
# token not parsable, skip
break
@property
def level(self):
"""Extract log level if available (lazy)."""
if not self._level_calculated:
self._level_calculated = True
self._extract_level()
return self._level
@property
def component(self):
"""Extract log component if available (lazy)."""
self.level
return self._component
def _extract_level(self):
"""Extract level and component if available (lazy)."""
if self._level is None:
split_tokens = self.split_tokens
if not split_tokens:
self._level = False
self._component = False
return
x = (self.log_levels.index(split_tokens[1])
if split_tokens[1] in self.log_levels else None)
if x is not None:
self._level = split_tokens[1]
self._component = split_tokens[2]
else:
self._level = False
self._component = False
def parse_all(self):
"""
Trigger extraction of all information.
These values are usually evaluated lazily.
"""
tokens = self.split_tokens
duration = self.duration
datetime = self.datetime
thread = self.thread
operation = self.operation
namespace = self.namespace
pattern = self.pattern
nscanned = self.nscanned
nscannedObjects = self.nscannedObjects
ntoreturn = self.ntoreturn
nreturned = self.nreturned
ninserted = self.ninserted
ndeleted = self.ndeleted
nupdated = self.nupdated
numYields = self.numYields
w = self.w
r = self.r
def _find_pattern(self, trigger):
# get start of json query pattern
start_idx = self.line_str.rfind(trigger)
if start_idx == -1:
# no query pattern found
return None
stop_idx = 0
brace_counter = 0
search_str = self.line_str[start_idx + len(trigger):]
for match in re.finditer(r'{|}', search_str):
stop_idx = match.start()
if search_str[stop_idx] == '{':
brace_counter += 1
else:
brace_counter -= 1
if brace_counter == 0:
break
search_str = search_str[:stop_idx + 1].strip()
if search_str:
return json2pattern(search_str)
else:
return None
def _reformat_timestamp(self, format, force=False):
if format not in ['ctime', 'ctime-pre2.4', 'iso8601-utc',
'iso8601-local']:
raise ValueError('invalid datetime format %s, choose from ctime, '
'ctime-pre2.4, iso8601-utc, iso8601-local.')
if ((self.datetime_format is None or
(self.datetime_format == format and
self._datetime_str != '')) and not force):
return
elif self.datetime is None:
return
elif format.startswith('ctime'):
dt_string = (self.weekdays[self.datetime.weekday()] + ' ' +
self.datetime.strftime("%b %d %H:%M:%S"))
# remove zero-padding from day number
tokens = dt_string.split(' ')
if tokens[2].startswith('0'):
tokens[2] = tokens[2].replace('0', ' ', 1)
dt_string = ' '.join(tokens)
if format == 'ctime':
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)
elif format == 'iso8601-local':
dt_string = self.datetime.isoformat()
if self.datetime.utcoffset() is None:
dt_string += '+00:00'
ms_str = str(int(self.datetime.microsecond / 1000)).zfill(3)[:3]
# change isoformat string to have 3 digit milliseconds and no :
# in offset
dt_string = re.sub(r'(\.\d+)?([+-])(\d\d):(\d\d)',
'.%s\\2\\3\\4' % ms_str, dt_string, count=1)
elif format == 'iso8601-utc':
if self.datetime.utcoffset():
dt_string = self.datetime.astimezone(tzutc()).strftime("%Y-%m-"
"%dT%H:"
"%M:%S")
else:
dt_string = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)[:3] + 'Z'
# set new string and format
self._datetime_str = dt_string
self._datetime_format = format
def __str__(self):
"""Default string conversion for LogEvent object is its line_str."""
return str(self.line_str)
def to_dict(self, labels=None):
"""Convert LogEvent object to a dictionary."""
output = {}
if labels is None:
labels = ['line_str', 'split_tokens', 'datetime', 'operation',
'thread', 'namespace', 'nscanned', 'ntoreturn',
'nreturned', 'ninserted', 'nupdated', 'ndeleted',
'duration', 'r', 'w', 'numYields']
for label in labels:
value = getattr(self, label, None)
if value is not None:
output[label] = value
return output
def to_json(self, labels=None):
"""Convert LogEvent object to valid JSON."""
output = self.to_dict(labels)
return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False)
def _parse_document(self):
"""Parse system.profile doc, copy all values to member variables."""
self._reset()
doc = self._profile_doc
self._split_tokens_calculated = True
self._split_tokens = None
self._duration_calculated = True
self._duration = doc[u'millis']
self._datetime_calculated = True
self._datetime = doc[u'ts']
if self._datetime.tzinfo is None:
self._datetime = self._datetime.replace(tzinfo=tzutc())
self._datetime_format = None
self._reformat_timestamp('ctime', force=True)
self._thread_calculated = True
self._thread = doc['thread']
self._operation_calculated = True
self._operation = doc[u'op']
self._namespace = doc[u'ns']
self._command_calculated = True
if self.operation == 'command':
self._command = doc[u'command'].keys()[0]
# query pattern for system.profile events, all three cases.
# See SERVER-13245
if 'query' in doc:
if 'query' in doc['query'] and isinstance(doc['query']['query'],
dict):
self._pattern = str(doc['query']['query']).replace("'", '"')
elif '$query' in doc['query']:
self._pattern = str(doc['query']['$query']).replace("'", '"')
else:
self._pattern = str(doc['query']).replace("'", '"')
# sort pattern
if ('orderby' in doc['query'] and
isinstance(doc['query']['orderby'], dict)):
self._sort_pattern = str(doc['query']
['orderby']).replace("'", '"')
elif '$orderby' in doc['query']:
self._sort_pattern = str(doc['query']
['$orderby']).replace("'", '"')
else:
self._sort_pattern = None
self._counters_calculated = True
self._nscanned = doc[u'nscanned'] if 'nscanned' in doc else None
self._ntoreturn = doc[u'ntoreturn'] if 'ntoreturn' in doc else None
self._nupdated = doc[u'nupdated'] if 'nupdated' in doc else None
self._nreturned = doc[u'nreturned'] if 'nreturned' in doc else None
self._ninserted = doc[u'ninserted'] if 'ninserted' in doc else None
self._ndeleted = doc[u'ndeleted'] if 'ndeleted' in doc else None
self._numYields = doc[u'numYield'] if 'numYield' in doc else None
if u'lockStats' in doc:
self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r']
self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w']
self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r']
self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w']
locks = 'w:%i' % self.w if self.w is not None else 'r:%i' % self.r
elif u'locks' in doc:
locks = json.dumps(doc[u'locks'])
else:
locks = ''
# build a fake line_str
payload = ''
if 'query' in doc:
payload += ('query: %s' % str(doc[u'query'])
.replace("u'", "'").replace("'", '"'))
if 'command' in doc:
payload += ('command: %s' % str(doc[u'command'])
.replace("u'", "'").replace("'", '"'))
if 'updateobj' in doc:
payload += (' update: %s' % str(doc[u'updateobj'])
.replace("u'", "'").replace("'", '"'))
scanned = 'nscanned:%i' % self._nscanned if 'nscanned' in doc else ''
yields = 'numYields:%i' % self._numYields if 'numYield' in doc else ''
duration = '%ims' % self.duration if self.duration is not None else ''
self._line_str = ("[{thread}] {operation} {namespace} {payload} "
"{scanned} {yields} locks(micros) {locks} "
"{duration}".format(datetime=self.datetime,
thread=self.thread,
operation=self.operation,
namespace=self.namespace,
payload=payload, scanned=scanned,
yields=yields, locks=locks,
duration=duration)) | def _match_datetime_pattern(self, tokens):
"""
Match the datetime pattern at the beginning of the token list.
There are several formats that this method needs to understand
and distinguish between (see MongoDB's SERVER-7965):
ctime-pre2.4 Wed Dec 31 19:00:00
ctime Wed Dec 31 19:00:00.000
iso8601-utc 1970-01-01T00:00:00.000Z
iso8601-local 1969-12-31T19:00:00.000+0500
"""
# first check: less than 4 tokens can't be ctime
assume_iso8601_format = len(tokens) < 4
# check for ctime-pre-2.4 or ctime format
if not assume_iso8601_format:
weekday, month, day, time = tokens[:4]
if (len(tokens) < 4 or (weekday not in self.weekdays) or
(month not in self.months) or not day.isdigit()):
assume_iso8601_format = True
if assume_iso8601_format:
# sanity check, because the dateutil parser could interpret
# any numbers as a valid date
if not re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}',
tokens[0]):
return None
# convinced that this is a ISO-8601 format, the dateutil parser
# will do the rest
dt = dateutil.parser.parse(tokens[0])
self._datetime_format = "iso8601-utc" \
if tokens[0].endswith('Z') else "iso8601-local"
else:
# assume current year unless self.year_rollover
# is set (from LogFile)
year = datetime.now().year
dt = dateutil.parser.parse(' '.join(tokens[: 4]),
default=datetime(year, 1, 1))
if dt.tzinfo is None:
dt = dt.replace(tzinfo=tzutc())
if self._year_rollover and dt > self._year_rollover:
dt = dt.replace(year=year - 1)
self._datetime_format = "ctime" \
if '.' in tokens[3] else "ctime-pre2.4"
return dt | 279 | 330 | #!/bin/python
import json
import re
import sys
from datetime import datetime
import dateutil.parser
from dateutil.tz import tzutc
from six.moves import range
from mtools.util.pattern import json2pattern
class DateTimeEncoder(json.JSONEncoder):
"""Custom datetime encoder for json output."""
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class LogEvent(object):
"""
Extract information from log line and store properties/variables.
line_str: the original line string
split_tokens: a list of string tokens after splitting line_str using
whitespace as split points
datetime: a datetime object for the logevent. For logfiles created with
version 2.4+, it also contains micro-seconds
duration: the duration of a timed operation in ms
thread: the thread name (e.g. "conn1234") as string
operation: insert, update, remove, query, command, getmore, None
namespace: the namespace of the operation, or None
command: the type of command, if the operation was a "command"
pattern: the query pattern for queries, updates, counts, etc
...
Certain operations also add the number of affected/scanned documents.
If applicable, the following variables are also set, otherwise the
default is None: nscanned, ntoreturn, nreturned, ninserted, nupdated
For performance reason, all fields are evaluated lazily upon first
request.
"""
# datetime handler for json encoding
dthandler = lambda obj: obj.isoformat() if isinstance(obj,
datetime) else None
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec']
log_operations = ['query', 'insert', 'update', 'remove', 'getmore',
'command']
log_levels = ['D', 'F', 'E', 'W', 'I', 'U']
log_components = ['-', 'ACCESS', 'COMMAND', 'CONTROL', 'GEO', 'INDEX',
'NETWORK', 'QUERY', 'REPL', 'SHARDING', 'STORAGE',
'JOURNAL', 'WRITE', 'TOTAL']
def __init__(self, doc_or_str):
self._year_rollover = False
if isinstance(doc_or_str, bytes):
doc_or_str = doc_or_str.decode("utf-8")
if isinstance(doc_or_str, str) or (sys.version_info.major == 2 and
isinstance(doc_or_str, unicode)):
# create from string, remove line breaks at end of _line_str
self.from_string = True
self._line_str = doc_or_str.rstrip()
self._profile_doc = None
self._reset()
else:
self.from_string = False
self._profile_doc = doc_or_str
# docs don't need to be parsed lazily, they are fast
self._parse_document()
def _reset(self):
self._split_tokens_calculated = False
self._split_tokens = None
self._duration_calculated = False
self._duration = None
self._datetime_calculated = False
self._datetime = None
self._datetime_nextpos = None
self._datetime_format = None
self._datetime_str = ''
self._thread_calculated = False
self._thread = None
self._operation_calculated = False
self._operation = None
self._namespace = None
self._pattern = None
self._sort_pattern = None
self._command_calculated = False
self._command = None
self._counters_calculated = False
# TODO: refactor from the legacy names to modern
# (eg: nscanned => keysExamined). Currently _extract_counters()
# maps newer property names into legacy equivalents for
# broader log file support.
self._nscanned = None # keysExamined
self._nscannedObjects = None # docsExamined
self._ntoreturn = None
self._nupdated = None # nModified
self._nreturned = None # nReturned or nMatched (updates)
self._ninserted = None # nInserted
self._ndeleted = None # nDeleted
self._numYields = None
self._planSummary = None
self._writeConflicts = None
self._r = None
self._w = None
self._conn = None
self._level_calculated = False
self._level = None
self._component = None
self.merge_marker_str = ''
def set_line_str(self, line_str):
"""
Set line_str.
Line_str is only writeable if LogEvent was created from a string,
not from a system.profile documents.
"""
if not self.from_string:
raise ValueError("can't set line_str for LogEvent created from "
"system.profile documents.")
if line_str != self._line_str:
self._line_str = line_str.rstrip()
self._reset()
def get_line_str(self):
"""Return line_str depending on source, logfile or system.profile."""
if self.from_string:
return ' '.join([s for s in [self.merge_marker_str,
self._datetime_str,
self._line_str] if s])
else:
return ' '.join([s for s in [self._datetime_str,
self._line_str] if s])
line_str = property(get_line_str, set_line_str)
@property
def split_tokens(self):
"""Split string into tokens (lazy)."""
if not self._split_tokens_calculated:
# split into items (whitespace split)
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True
return self._split_tokens
@property
def duration(self):
"""Calculate duration if available (lazy)."""
if not self._duration_calculated:
self._duration_calculated = True
# split_tokens = self.split_tokens
line_str = self.line_str
if (line_str
and line_str.endswith('ms')
and 'Scheduled new oplog query' not in line_str):
try:
# find duration from end
space_pos = line_str.rfind(" ")
if space_pos == -1:
return
self._duration = int(line_str[line_str.rfind(" ") +
1:-2].replace(',', ''))
except ValueError:
self._duration = None
elif "flushing" in self.line_str:
matchobj = re.search(r'flushing mmaps took (\d+)ms',
self.line_str)
if matchobj:
self._duration = int(matchobj.group(1))
return self._duration
@property
def datetime(self):
"""Extract datetime if available (lazy)."""
if not self._datetime_calculated:
self._datetime_calculated = True
# if no datetime after 10 tokens, break to avoid parsing
# very long lines
split_tokens = self.split_tokens[:10]
for offs in range(len(split_tokens)):
dt = self._match_datetime_pattern(split_tokens[offs:offs + 4])
if dt:
self._datetime = dt
self._datetime_nextpos = offs
if self._datetime_format.startswith("iso8601"):
self._datetime_nextpos += 1
else:
self._datetime_nextpos += 4
# separate datetime str and linestr
self._line_str = (' '.join(self.split_tokens
[self._datetime_nextpos:]))
if self.level:
self._datetime_nextpos += 2
self._reformat_timestamp(self._datetime_format)
break
return self._datetime
@property
def datetime_format(self):
if not self._datetime_calculated:
_ = self.datetime
return self._datetime_format
@property
def datetime_nextpos(self):
if self._datetime_nextpos is None and not self._datetime_calculated:
_ = self.datetime
return self._datetime_nextpos
def set_datetime_hint(self, format, nextpos, rollover):
self._datetime_format = format
self._datetime_nextpos = nextpos
self._year_rollover = rollover
# Fast check if timestamp format changed.
# If it has, trigger datetime evaluation.
if format.startswith('ctime'):
if (len(self.split_tokens) < 4 or
self.split_tokens[self._datetime_nextpos - 4] not in
self.weekdays):
_ = self.datetime
return False
return True
else:
if len(self.split_tokens) == 0:
# empty line, no need to parse datetime
self._datetime_calculated = True
return False
try:
if not (self.split_tokens[self._datetime_nextpos - 1][0]
.isdigit()):
# not the timestamp format that was hinted
_ = self.datetime
return False
except Exception:
pass
return True
def _match_datetime_pattern(self, tokens):
"""
Match the datetime pattern at the beginning of the token list.
There are several formats that this method needs to understand
and distinguish between (see MongoDB's SERVER-7965):
ctime-pre2.4 Wed Dec 31 19:00:00
ctime Wed Dec 31 19:00:00.000
iso8601-utc 1970-01-01T00:00:00.000Z
iso8601-local 1969-12-31T19:00:00.000+0500
"""
# first check: less than 4 tokens can't be ctime
assume_iso8601_format = len(tokens) < 4
# check for ctime-pre-2.4 or ctime format
if not assume_iso8601_format:
weekday, month, day, time = tokens[:4]
if (len(tokens) < 4 or (weekday not in self.weekdays) or
(month not in self.months) or not day.isdigit()):
assume_iso8601_format = True
if assume_iso8601_format:
# sanity check, because the dateutil parser could interpret
# any numbers as a valid date
if not re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}',
tokens[0]):
return None
# convinced that this is a ISO-8601 format, the dateutil parser
# will do the rest
dt = dateutil.parser.parse(tokens[0])
self._datetime_format = "iso8601-utc" \
if tokens[0].endswith('Z') else "iso8601-local"
else:
# assume current year unless self.year_rollover
# is set (from LogFile)
year = datetime.now().year
dt = dateutil.parser.parse(' '.join(tokens[: 4]),
default=datetime(year, 1, 1))
if dt.tzinfo is None:
dt = dt.replace(tzinfo=tzutc())
if self._year_rollover and dt > self._year_rollover:
dt = dt.replace(year=year - 1)
self._datetime_format = "ctime" \
if '.' in tokens[3] else "ctime-pre2.4"
return dt
@property
def thread(self):
"""Extract thread name if available (lazy)."""
if not self._thread_calculated:
self._thread_calculated = True
split_tokens = self.split_tokens
if not self.datetime_nextpos:
return None
if len(split_tokens) <= self.datetime_nextpos:
return None
connection_token = split_tokens[self.datetime_nextpos]
match = re.match(r'^\[([^\]]*)\]$', connection_token)
if match:
self._thread = match.group(1)
if self._thread is not None:
if self._thread in ['initandlisten', 'mongosMain']:
if len(split_tokens) >= 5 and split_tokens[-5][0] == '#':
self._conn = 'conn' + split_tokens[-5][1:]
elif self._thread.startswith('conn'):
self._conn = self._thread
return self._thread
@property
def conn(self):
r"""
Extract conn name if available (lazy).
This value is None for all lines except the log lines related to
connections, that is lines matching '\[conn[0-9]+\]' or
'\[(initandlisten|mongosMain)\] .* connection accepted from'.
"""
self.thread
return self._conn
@property
def operation(self):
"""
Extract operation if available (lazy).
Operations: query, insert, update, remove, getmore, command
"""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._operation
@property
def namespace(self):
"""Extract namespace if available (lazy)."""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._namespace
def _extract_operation_and_namespace(self):
"""
Helper method to extract both operation and namespace from a logevent.
It doesn't make sense to only extract one as they appear back to back
in the token list.
"""
split_tokens = self.split_tokens
if not self._datetime_nextpos:
# force evaluation of thread to get access to datetime_offset and
# to protect from changes due to line truncation.
_ = self.thread
if not self._datetime_nextpos or (len(split_tokens) <=
self._datetime_nextpos + 2):
return
op = split_tokens[self._datetime_nextpos + 1].lower()
if op == 'warning:':
# check if this log line got truncated
if ("warning: log line attempted" in self._line_str and
"over max size" in self._line_str):
self._datetime_nextpos = split_tokens.index('...')
op = split_tokens[self._datetime_nextpos + 1]
else:
# unknown warning, bail out
return
if op in self.log_operations:
self._operation = op
self._namespace = split_tokens[self._datetime_nextpos + 2]
@property
def pattern(self):
"""Extract query pattern from operations."""
if not self._pattern:
# trigger evaluation of operation
if (self.operation in ['query', 'getmore', 'update', 'remove'] or
self.command in ['count', 'findandmodify']):
self._pattern = self._find_pattern('query: ')
elif self.command == 'find':
self._pattern = self._find_pattern('filter: ')
return self._pattern
@property
def sort_pattern(self):
"""Extract query pattern from operations."""
if not self._sort_pattern:
# trigger evaluation of operation
if self.operation in ['query', 'getmore']:
self._sort_pattern = self._find_pattern('orderby: ')
return self._sort_pattern
@property
def command(self):
"""Extract query pattern from operations."""
if not self._command_calculated:
self._command_calculated = True
if self.operation == 'command':
try:
command_idx = self.split_tokens.index('command:')
command = self.split_tokens[command_idx + 1]
if command == '{':
# workaround for <= 2.2 log files,
# where command was not listed separately
command = self.split_tokens[command_idx + 2][:-1]
self._command = command.lower()
except ValueError:
pass
return self._command
@property
def nscanned(self):
"""Extract nscanned or keysExamined counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscanned
@property
def nscannedObjects(self):
"""
Extract counters if available (lazy).
Looks for nscannedObjects or docsExamined.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscannedObjects
@property
def ntoreturn(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ntoreturn
@property
def writeConflicts(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._writeConflicts
@property
def nreturned(self):
"""
Extract counters if available (lazy).
Looks for nreturned, nReturned, or nMatched counter.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nreturned
@property
def ninserted(self):
"""Extract ninserted or nInserted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ninserted
@property
def ndeleted(self):
"""Extract ndeleted or nDeleted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ndeleted
@property
def nupdated(self):
"""Extract nupdated or nModified counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nupdated
@property
def numYields(self):
"""Extract numYields counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._numYields
@property
def planSummary(self):
"""Extract numYields counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._planSummary
@property
def r(self):
"""Extract read lock (r) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._r
@property
def w(self):
"""Extract write lock (w) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._w
def _extract_counters(self):
"""Extract counters like nscanned and nreturned from the logevent."""
# extract counters (if present)
counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned',
'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields',
'planSummary', 'writeConflicts', 'keyUpdates']
# TODO: refactor mtools to use current counter names throughout
# Transitionary hack: mapping of current names into prior equivalents
counter_equiv = {
'docsExamined': 'nscannedObjects',
'keysExamined': 'nscanned',
'nDeleted': 'ndeleted',
'nInserted': 'ninserted',
'nMatched': 'nreturned',
'nModified': 'nupdated'
}
counters.extend(counter_equiv.keys())
split_tokens = self.split_tokens
# trigger operation evaluation to get access to offset
if self.operation:
for t, token in enumerate(split_tokens[self.datetime_nextpos +
2:]):
for counter in counters:
if token.startswith('%s:' % counter):
try:
# Remap counter to standard name, if applicable
counter = counter_equiv.get(counter, counter)
vars(self)['_' + counter] = int((token.split(':')
[-1]).replace(',',
''))
except ValueError:
# see if this is a pre-2.5.2 numYields with space
# in between (e.g. "numYields: 2")
# https://jira.mongodb.org/browse/SERVER-10101
if (counter == 'numYields' and
token.startswith('numYields')):
try:
self._numYields = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
except ValueError:
pass
if (counter == 'planSummary' and
token.startswith('planSummary')):
try:
self._planSummary = split_tokens[t + 1 + self.datetime_nextpos + 2]
except ValueError:
pass
# token not parsable, skip
break
@property
def level(self):
"""Extract log level if available (lazy)."""
if not self._level_calculated:
self._level_calculated = True
self._extract_level()
return self._level
@property
def component(self):
"""Extract log component if available (lazy)."""
self.level
return self._component
def _extract_level(self):
"""Extract level and component if available (lazy)."""
if self._level is None:
split_tokens = self.split_tokens
if not split_tokens:
self._level = False
self._component = False
return
x = (self.log_levels.index(split_tokens[1])
if split_tokens[1] in self.log_levels else None)
if x is not None:
self._level = split_tokens[1]
self._component = split_tokens[2]
else:
self._level = False
self._component = False
def parse_all(self):
"""
Trigger extraction of all information.
These values are usually evaluated lazily.
"""
tokens = self.split_tokens
duration = self.duration
datetime = self.datetime
thread = self.thread
operation = self.operation
namespace = self.namespace
pattern = self.pattern
nscanned = self.nscanned
nscannedObjects = self.nscannedObjects
ntoreturn = self.ntoreturn
nreturned = self.nreturned
ninserted = self.ninserted
ndeleted = self.ndeleted
nupdated = self.nupdated
numYields = self.numYields
w = self.w
r = self.r
def _find_pattern(self, trigger):
# get start of json query pattern
start_idx = self.line_str.rfind(trigger)
if start_idx == -1:
# no query pattern found
return None
stop_idx = 0
brace_counter = 0
search_str = self.line_str[start_idx + len(trigger):]
for match in re.finditer(r'{|}', search_str):
stop_idx = match.start()
if search_str[stop_idx] == '{':
brace_counter += 1
else:
brace_counter -= 1
if brace_counter == 0:
break
search_str = search_str[:stop_idx + 1].strip()
if search_str:
return json2pattern(search_str)
else:
return None
def _reformat_timestamp(self, format, force=False):
if format not in ['ctime', 'ctime-pre2.4', 'iso8601-utc',
'iso8601-local']:
raise ValueError('invalid datetime format %s, choose from ctime, '
'ctime-pre2.4, iso8601-utc, iso8601-local.')
if ((self.datetime_format is None or
(self.datetime_format == format and
self._datetime_str != '')) and not force):
return
elif self.datetime is None:
return
elif format.startswith('ctime'):
dt_string = (self.weekdays[self.datetime.weekday()] + ' ' +
self.datetime.strftime("%b %d %H:%M:%S"))
# remove zero-padding from day number
tokens = dt_string.split(' ')
if tokens[2].startswith('0'):
tokens[2] = tokens[2].replace('0', ' ', 1)
dt_string = ' '.join(tokens)
if format == 'ctime':
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)
elif format == 'iso8601-local':
dt_string = self.datetime.isoformat()
if self.datetime.utcoffset() is None:
dt_string += '+00:00'
ms_str = str(int(self.datetime.microsecond / 1000)).zfill(3)[:3]
# change isoformat string to have 3 digit milliseconds and no :
# in offset
dt_string = re.sub(r'(\.\d+)?([+-])(\d\d):(\d\d)',
'.%s\\2\\3\\4' % ms_str, dt_string, count=1)
elif format == 'iso8601-utc':
if self.datetime.utcoffset():
dt_string = self.datetime.astimezone(tzutc()).strftime("%Y-%m-"
"%dT%H:"
"%M:%S")
else:
dt_string = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)[:3] + 'Z'
# set new string and format
self._datetime_str = dt_string
self._datetime_format = format
def __str__(self):
"""Default string conversion for LogEvent object is its line_str."""
return str(self.line_str)
def to_dict(self, labels=None):
"""Convert LogEvent object to a dictionary."""
output = {}
if labels is None:
labels = ['line_str', 'split_tokens', 'datetime', 'operation',
'thread', 'namespace', 'nscanned', 'ntoreturn',
'nreturned', 'ninserted', 'nupdated', 'ndeleted',
'duration', 'r', 'w', 'numYields']
for label in labels:
value = getattr(self, label, None)
if value is not None:
output[label] = value
return output
def to_json(self, labels=None):
"""Convert LogEvent object to valid JSON."""
output = self.to_dict(labels)
return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False)
def _parse_document(self):
"""Parse system.profile doc, copy all values to member variables."""
self._reset()
doc = self._profile_doc
self._split_tokens_calculated = True
self._split_tokens = None
self._duration_calculated = True
self._duration = doc[u'millis']
self._datetime_calculated = True
self._datetime = doc[u'ts']
if self._datetime.tzinfo is None:
self._datetime = self._datetime.replace(tzinfo=tzutc())
self._datetime_format = None
self._reformat_timestamp('ctime', force=True)
self._thread_calculated = True
self._thread = doc['thread']
self._operation_calculated = True
self._operation = doc[u'op']
self._namespace = doc[u'ns']
self._command_calculated = True
if self.operation == 'command':
self._command = doc[u'command'].keys()[0]
# query pattern for system.profile events, all three cases.
# See SERVER-13245
if 'query' in doc:
if 'query' in doc['query'] and isinstance(doc['query']['query'],
dict):
self._pattern = str(doc['query']['query']).replace("'", '"')
elif '$query' in doc['query']:
self._pattern = str(doc['query']['$query']).replace("'", '"')
else:
self._pattern = str(doc['query']).replace("'", '"')
# sort pattern
if ('orderby' in doc['query'] and
isinstance(doc['query']['orderby'], dict)):
self._sort_pattern = str(doc['query']
['orderby']).replace("'", '"')
elif '$orderby' in doc['query']:
self._sort_pattern = str(doc['query']
['$orderby']).replace("'", '"')
else:
self._sort_pattern = None
self._counters_calculated = True
self._nscanned = doc[u'nscanned'] if 'nscanned' in doc else None
self._ntoreturn = doc[u'ntoreturn'] if 'ntoreturn' in doc else None
self._nupdated = doc[u'nupdated'] if 'nupdated' in doc else None
self._nreturned = doc[u'nreturned'] if 'nreturned' in doc else None
self._ninserted = doc[u'ninserted'] if 'ninserted' in doc else None
self._ndeleted = doc[u'ndeleted'] if 'ndeleted' in doc else None
self._numYields = doc[u'numYield'] if 'numYield' in doc else None
if u'lockStats' in doc:
self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r']
self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w']
self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r']
self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w']
locks = 'w:%i' % self.w if self.w is not None else 'r:%i' % self.r
elif u'locks' in doc:
locks = json.dumps(doc[u'locks'])
else:
locks = ''
# build a fake line_str
payload = ''
if 'query' in doc:
payload += ('query: %s' % str(doc[u'query'])
.replace("u'", "'").replace("'", '"'))
if 'command' in doc:
payload += ('command: %s' % str(doc[u'command'])
.replace("u'", "'").replace("'", '"'))
if 'updateobj' in doc:
payload += (' update: %s' % str(doc[u'updateobj'])
.replace("u'", "'").replace("'", '"'))
scanned = 'nscanned:%i' % self._nscanned if 'nscanned' in doc else ''
yields = 'numYields:%i' % self._numYields if 'numYield' in doc else ''
duration = '%ims' % self.duration if self.duration is not None else ''
self._line_str = ("[{thread}] {operation} {namespace} {payload} "
"{scanned} {yields} locks(micros) {locks} "
"{duration}".format(datetime=self.datetime,
thread=self.thread,
operation=self.operation,
namespace=self.namespace,
payload=payload, scanned=scanned,
yields=yields, locks=locks,
duration=duration))
|
merge | Method used to merge multiple reports together. Here it simply
concatenates the lists of values saved in the different reports.
Parameters
----------
reports : list of dict
List of reports that need to be concatenated together
Returns
-------
report : dict
Final report with all concatenated values | """Module containing examples of report builder functions and classes."""
from collections import OrderedDict
import numpy as np
def example_fn_build_report(report, pvarray):
"""Example function that builds a report when used in the
:py:class:`~pvfactors.engine.PVEngine` with full mode simulations.
Here it will be a dictionary with lists of calculated values.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
"""
# Initialize the report
if report is None:
list_keys = ['qinc_front', 'qinc_back', 'iso_front', 'iso_back']
report = OrderedDict({key: [] for key in list_keys})
# Add elements to the report
if pvarray is not None:
pvrow = pvarray.pvrows[1] # use center pvrow
report['qinc_front'].append(
pvrow.front.get_param_weighted('qinc'))
report['qinc_back'].append(
pvrow.back.get_param_weighted('qinc'))
report['iso_front'].append(
pvrow.front.get_param_weighted('isotropic'))
report['iso_back'].append(
pvrow.back.get_param_weighted('isotropic'))
else:
# No calculation was performed, because sun was down
report['qinc_front'].append(np.nan)
report['qinc_back'].append(np.nan)
report['iso_front'].append(np.nan)
report['iso_back'].append(np.nan)
return report
class ExampleReportBuilder(object):
"""A class is required to build reports when running calculations with
multiprocessing because of python constraints"""
@staticmethod
def build(report, pvarray):
"""Method that will build the simulation report. Here we're using the
previously defined
:py:function:`~pvfactors.report.example_fn_build_report`.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
"""
return example_fn_build_report(report, pvarray)
# MASKED: merge function (lines 73-95) | @staticmethod
def merge(reports):
"""Method used to merge multiple reports together. Here it simply
concatenates the lists of values saved in the different reports.
Parameters
----------
reports : list of dict
List of reports that need to be concatenated together
Returns
-------
report : dict
Final report with all concatenated values
"""
report = reports[0]
# Merge only if more than 1 report
if len(reports) > 1:
keys_report = list(reports[0].keys())
for other_report in reports[1:]:
for key in keys_report:
report[key] += other_report[key]
return report | 73 | 95 | """Module containing examples of report builder functions and classes."""
from collections import OrderedDict
import numpy as np
def example_fn_build_report(report, pvarray):
"""Example function that builds a report when used in the
:py:class:`~pvfactors.engine.PVEngine` with full mode simulations.
Here it will be a dictionary with lists of calculated values.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
"""
# Initialize the report
if report is None:
list_keys = ['qinc_front', 'qinc_back', 'iso_front', 'iso_back']
report = OrderedDict({key: [] for key in list_keys})
# Add elements to the report
if pvarray is not None:
pvrow = pvarray.pvrows[1] # use center pvrow
report['qinc_front'].append(
pvrow.front.get_param_weighted('qinc'))
report['qinc_back'].append(
pvrow.back.get_param_weighted('qinc'))
report['iso_front'].append(
pvrow.front.get_param_weighted('isotropic'))
report['iso_back'].append(
pvrow.back.get_param_weighted('isotropic'))
else:
# No calculation was performed, because sun was down
report['qinc_front'].append(np.nan)
report['qinc_back'].append(np.nan)
report['iso_front'].append(np.nan)
report['iso_back'].append(np.nan)
return report
class ExampleReportBuilder(object):
"""A class is required to build reports when running calculations with
multiprocessing because of python constraints"""
@staticmethod
def build(report, pvarray):
"""Method that will build the simulation report. Here we're using the
previously defined
:py:function:`~pvfactors.report.example_fn_build_report`.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
"""
return example_fn_build_report(report, pvarray)
@staticmethod
def merge(reports):
"""Method used to merge multiple reports together. Here it simply
concatenates the lists of values saved in the different reports.
Parameters
----------
reports : list of dict
List of reports that need to be concatenated together
Returns
-------
report : dict
Final report with all concatenated values
"""
report = reports[0]
# Merge only if more than 1 report
if len(reports) > 1:
keys_report = list(reports[0].keys())
for other_report in reports[1:]:
for key in keys_report:
report[key] += other_report[key]
return report
|
call | Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor. | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 ConvBERT model."""
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFSequenceSummary,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_convbert import ConvBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
_CONFIG_FOR_DOC = "ConvBertConfig"
_TOKENIZER_FOR_DOC = "ConvBertTokenizer"
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"YituTech/conv-bert-base",
"YituTech/conv-bert-medium-small",
"YituTech/conv-bert-small",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
]
# Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert
class TFConvBertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config: ConvBertConfig, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.embedding_size = config.embedding_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
# MASKED: call function (lines 106-143)
class TFConvBertSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
new_num_attention_heads = int(config.num_attention_heads / config.head_ratio)
if new_num_attention_heads < 1:
self.head_ratio = config.num_attention_heads
num_attention_heads = 1
else:
num_attention_heads = new_num_attention_heads
self.head_ratio = config.head_ratio
self.num_attention_heads = num_attention_heads
self.conv_kernel_size = config.conv_kernel_size
assert (
config.hidden_size % self.num_attention_heads == 0
), "hidden_size should be divisible by num_attention_heads"
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.key_conv_attn_layer = tf.keras.layers.SeparableConv1D(
self.all_head_size,
self.conv_kernel_size,
padding="same",
activation=None,
depthwise_initializer=get_initializer(1 / self.conv_kernel_size),
pointwise_initializer=get_initializer(config.initializer_range),
name="key_conv_attn_layer",
)
self.conv_kernel_layer = tf.keras.layers.Dense(
self.num_attention_heads * self.conv_kernel_size,
activation=None,
name="conv_kernel_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.conv_out_layer = tf.keras.layers.Dense(
self.all_head_size,
activation=None,
name="conv_out_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, batch_size):
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
conv_kernel_layer = tf.nn.softmax(conv_kernel_layer, axis=1)
paddings = tf.constant(
[
[
0,
0,
],
[int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)],
[0, 0],
]
)
conv_out_layer = self.conv_out_layer(hidden_states)
conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT")
unfold_conv_out_layer = tf.stack(
[
tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size])
for i in range(self.conv_kernel_size)
],
axis=-1,
)
conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer)
conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size])
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = tf.matmul(
query_layer, key_layer, transpose_b=True
) # (batch size, num_heads, seq_len_q, seq_len_k)
dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
value_layer = tf.reshape(
mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]
)
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
context_layer = tf.concat([context_layer, conv_out], 2)
context_layer = tf.reshape(
context_layer, (batch_size, -1, self.head_ratio * self.all_head_size)
) # (batch_size, seq_len_q, all_head_size)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class TFConvBertSelfOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFConvBertSelfAttention(config, name="self")
self.dense_output = TFConvBertSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
self_outputs = self.self_attention(
input_tensor, attention_mask, head_mask, output_attentions, training=training
)
attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class GroupedLinearLayer(tf.keras.layers.Layer):
def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs):
super().__init__(**kwargs)
self.input_size = input_size
self.output_size = output_size
self.num_groups = num_groups
self.kernel_initializer = kernel_initializer
self.group_in_dim = self.input_size // self.num_groups
self.group_out_dim = self.output_size // self.num_groups
def build(self, input_shape):
self.kernel = self.add_weight(
"kernel",
shape=[self.group_out_dim, self.group_in_dim, self.num_groups],
initializer=self.kernel_initializer,
trainable=True,
)
self.bias = self.add_weight(
"bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True
)
def call(self, hidden_states):
batch_size = shape_list(hidden_states)[0]
x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2])
x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0]))
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [batch_size, -1, self.output_size])
x = tf.nn.bias_add(value=x, bias=self.bias)
return x
class TFConvBertIntermediate(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.hidden_size,
config.intermediate_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class TFConvBertOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.intermediate_size,
config.hidden_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.attention = TFConvBertAttention(config, name="attention")
self.intermediate = TFConvBertIntermediate(config, name="intermediate")
self.bert_output = TFConvBertOutput(config, name="output")
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions, training=training
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.bert_output(intermediate_output, attention_output, training=training)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class TFConvBertEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states,
attention_mask,
head_mask,
output_attentions,
output_hidden_states,
return_dict,
training=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], output_attentions, training=training
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFConvBertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@keras_serializable
class TFConvBertMainLayer(tf.keras.layers.Layer):
config_class = ConvBertConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.embeddings = TFConvBertEmbeddings(config, name="embeddings")
if config.embedding_size != config.hidden_size:
self.embeddings_project = tf.keras.layers.Dense(config.hidden_size, name="embeddings_project")
self.encoder = TFConvBertEncoder(config, name="encoder")
self.config = config
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings.weight = value
self.embeddings.vocab_size = value.shape[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def get_extended_attention_mask(self, attention_mask, input_shape, dtype):
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask):
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.config.num_hidden_layers
return head_mask
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(input_shape, 1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(input_shape, 0)
hidden_states = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["token_type_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)
extended_attention_mask = self.get_extended_attention_mask(
inputs["attention_mask"], input_shape, hidden_states.dtype
)
inputs["head_mask"] = self.get_head_mask(inputs["head_mask"])
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states, training=inputs["training"])
hidden_states = self.encoder(
hidden_states,
extended_attention_mask,
inputs["head_mask"],
inputs["output_attentions"],
inputs["output_hidden_states"],
inputs["return_dict"],
training=inputs["training"],
)
return hidden_states
class TFConvBertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ConvBertConfig
base_model_prefix = "convbert"
CONVBERT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CONVBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`ConvBertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
CONVBERT_START_DOCSTRING,
)
class TFConvBertModel(TFConvBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
class TFConvBertMaskedLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.input_embeddings
def set_output_embeddings(self, value):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
class TFConvBertGeneratorPredictions(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dense = tf.keras.layers.Dense(config.embedding_size, name="dense")
def call(self, generator_hidden_states, training=False):
hidden_states = self.dense(generator_hidden_states)
hidden_states = get_tf_activation("gelu")(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, **kwargs)
self.vocab_size = config.vocab_size
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions")
if isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head")
def get_lm_head(self):
return self.generator_lm_head
def get_prefix_bias_name(self):
return self.name + "/" + self.generator_lm_head.name
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
generator_hidden_states = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output, training=inputs["training"])
prediction_scores = self.generator_lm_head(prediction_scores, training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
if not inputs["return_dict"]:
output = (prediction_scores,) + generator_hidden_states[1:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=generator_hidden_states.hidden_states,
attentions=generator_hidden_states.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFConvBertClassificationHead(tf.keras.layers.Layer):
"""Head for sentence-level classification tasks."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.out_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
self.config = config
def call(self, hidden_states, **kwargs):
x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = get_tf_activation(self.config.hidden_act)(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.classifier = TFConvBertClassificationHead(config, name="classifier")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.classifier(outputs[0], training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.sequence_summary = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="sequence_summary"
)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(
CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
flat_inputs_embeds = (
tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.convbert(
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
inputs["head_mask"],
flat_inputs_embeds,
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.sequence_summary(outputs[0], training=inputs["training"])
logits = self.classifier(logits)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs["training"])
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
) | def call(
self,
input_ids: tf.Tensor = None,
position_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
past_key_values_length=0,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
"""
if input_ids is None and inputs_embeds is None:
raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(
tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings | 106 | 143 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 ConvBERT model."""
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFSequenceSummary,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_convbert import ConvBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
_CONFIG_FOR_DOC = "ConvBertConfig"
_TOKENIZER_FOR_DOC = "ConvBertTokenizer"
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"YituTech/conv-bert-base",
"YituTech/conv-bert-medium-small",
"YituTech/conv-bert-small",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
]
# Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert
class TFConvBertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config: ConvBertConfig, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.embedding_size = config.embedding_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
def call(
self,
input_ids: tf.Tensor = None,
position_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
past_key_values_length=0,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
"""
if input_ids is None and inputs_embeds is None:
raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(
tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFConvBertSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
new_num_attention_heads = int(config.num_attention_heads / config.head_ratio)
if new_num_attention_heads < 1:
self.head_ratio = config.num_attention_heads
num_attention_heads = 1
else:
num_attention_heads = new_num_attention_heads
self.head_ratio = config.head_ratio
self.num_attention_heads = num_attention_heads
self.conv_kernel_size = config.conv_kernel_size
assert (
config.hidden_size % self.num_attention_heads == 0
), "hidden_size should be divisible by num_attention_heads"
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.key_conv_attn_layer = tf.keras.layers.SeparableConv1D(
self.all_head_size,
self.conv_kernel_size,
padding="same",
activation=None,
depthwise_initializer=get_initializer(1 / self.conv_kernel_size),
pointwise_initializer=get_initializer(config.initializer_range),
name="key_conv_attn_layer",
)
self.conv_kernel_layer = tf.keras.layers.Dense(
self.num_attention_heads * self.conv_kernel_size,
activation=None,
name="conv_kernel_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.conv_out_layer = tf.keras.layers.Dense(
self.all_head_size,
activation=None,
name="conv_out_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, batch_size):
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
conv_kernel_layer = tf.nn.softmax(conv_kernel_layer, axis=1)
paddings = tf.constant(
[
[
0,
0,
],
[int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)],
[0, 0],
]
)
conv_out_layer = self.conv_out_layer(hidden_states)
conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT")
unfold_conv_out_layer = tf.stack(
[
tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size])
for i in range(self.conv_kernel_size)
],
axis=-1,
)
conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer)
conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size])
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = tf.matmul(
query_layer, key_layer, transpose_b=True
) # (batch size, num_heads, seq_len_q, seq_len_k)
dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
value_layer = tf.reshape(
mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]
)
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
context_layer = tf.concat([context_layer, conv_out], 2)
context_layer = tf.reshape(
context_layer, (batch_size, -1, self.head_ratio * self.all_head_size)
) # (batch_size, seq_len_q, all_head_size)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class TFConvBertSelfOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFConvBertSelfAttention(config, name="self")
self.dense_output = TFConvBertSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
self_outputs = self.self_attention(
input_tensor, attention_mask, head_mask, output_attentions, training=training
)
attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class GroupedLinearLayer(tf.keras.layers.Layer):
def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs):
super().__init__(**kwargs)
self.input_size = input_size
self.output_size = output_size
self.num_groups = num_groups
self.kernel_initializer = kernel_initializer
self.group_in_dim = self.input_size // self.num_groups
self.group_out_dim = self.output_size // self.num_groups
def build(self, input_shape):
self.kernel = self.add_weight(
"kernel",
shape=[self.group_out_dim, self.group_in_dim, self.num_groups],
initializer=self.kernel_initializer,
trainable=True,
)
self.bias = self.add_weight(
"bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True
)
def call(self, hidden_states):
batch_size = shape_list(hidden_states)[0]
x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2])
x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0]))
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [batch_size, -1, self.output_size])
x = tf.nn.bias_add(value=x, bias=self.bias)
return x
class TFConvBertIntermediate(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.hidden_size,
config.intermediate_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class TFConvBertOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.intermediate_size,
config.hidden_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.attention = TFConvBertAttention(config, name="attention")
self.intermediate = TFConvBertIntermediate(config, name="intermediate")
self.bert_output = TFConvBertOutput(config, name="output")
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions, training=training
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.bert_output(intermediate_output, attention_output, training=training)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class TFConvBertEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states,
attention_mask,
head_mask,
output_attentions,
output_hidden_states,
return_dict,
training=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], output_attentions, training=training
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFConvBertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@keras_serializable
class TFConvBertMainLayer(tf.keras.layers.Layer):
config_class = ConvBertConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.embeddings = TFConvBertEmbeddings(config, name="embeddings")
if config.embedding_size != config.hidden_size:
self.embeddings_project = tf.keras.layers.Dense(config.hidden_size, name="embeddings_project")
self.encoder = TFConvBertEncoder(config, name="encoder")
self.config = config
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings.weight = value
self.embeddings.vocab_size = value.shape[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def get_extended_attention_mask(self, attention_mask, input_shape, dtype):
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask):
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.config.num_hidden_layers
return head_mask
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(input_shape, 1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(input_shape, 0)
hidden_states = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["token_type_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)
extended_attention_mask = self.get_extended_attention_mask(
inputs["attention_mask"], input_shape, hidden_states.dtype
)
inputs["head_mask"] = self.get_head_mask(inputs["head_mask"])
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states, training=inputs["training"])
hidden_states = self.encoder(
hidden_states,
extended_attention_mask,
inputs["head_mask"],
inputs["output_attentions"],
inputs["output_hidden_states"],
inputs["return_dict"],
training=inputs["training"],
)
return hidden_states
class TFConvBertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ConvBertConfig
base_model_prefix = "convbert"
CONVBERT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CONVBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`ConvBertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
CONVBERT_START_DOCSTRING,
)
class TFConvBertModel(TFConvBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
class TFConvBertMaskedLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.input_embeddings
def set_output_embeddings(self, value):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
class TFConvBertGeneratorPredictions(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dense = tf.keras.layers.Dense(config.embedding_size, name="dense")
def call(self, generator_hidden_states, training=False):
hidden_states = self.dense(generator_hidden_states)
hidden_states = get_tf_activation("gelu")(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, **kwargs)
self.vocab_size = config.vocab_size
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions")
if isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head")
def get_lm_head(self):
return self.generator_lm_head
def get_prefix_bias_name(self):
return self.name + "/" + self.generator_lm_head.name
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
generator_hidden_states = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output, training=inputs["training"])
prediction_scores = self.generator_lm_head(prediction_scores, training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
if not inputs["return_dict"]:
output = (prediction_scores,) + generator_hidden_states[1:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=generator_hidden_states.hidden_states,
attentions=generator_hidden_states.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFConvBertClassificationHead(tf.keras.layers.Layer):
"""Head for sentence-level classification tasks."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.out_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
self.config = config
def call(self, hidden_states, **kwargs):
x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = get_tf_activation(self.config.hidden_act)(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.classifier = TFConvBertClassificationHead(config, name="classifier")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.classifier(outputs[0], training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.sequence_summary = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="sequence_summary"
)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(
CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
flat_inputs_embeds = (
tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.convbert(
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
inputs["head_mask"],
flat_inputs_embeds,
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.sequence_summary(outputs[0], training=inputs["training"])
logits = self.classifier(logits)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs["training"])
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
|
simulate_step_response | Compute the linear model response to an Heaviside function (or all-ones
array) sampled at given time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given. | import numpy as np
from numpy import (reciprocal, einsum, maximum, minimum, zeros_like,
atleast_1d, squeeze)
from scipy.linalg import eig, eigvals, matrix_balance, norm
from harold._classes import Transfer, transfer_to_state
from harold._discrete_funcs import discretize
from harold._arg_utils import _check_for_state, _check_for_state_or_transfer
__all__ = ['simulate_linear_system', 'simulate_step_response',
'simulate_impulse_response']
def simulate_linear_system(sys, u, t=None, x0=None, per_channel=False):
"""
Compute the linear model response to an input array sampled at given time
instances.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
u : array_like
The real-valued input sequence to force the model. 1D arrays for single
input models and 2D arrays that has as many columns as the number of
inputs are valid inputs.
t : array_like, optional
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
x0 : array_like, optional
The initial condition array. If omitted an array of zeros is assumed.
Note that Transfer models by definition assume zero initial conditions
and will raise an error.
per_channel : bool, optional
If this is set to True and if the system has multiple inputs, the
response of each input is returned individually. For example, if a
system has 4 inputs and 3 outputs then the response shape becomes
(num, p, m) instead of (num, p) where k-th slice (:, :, k) is the
response from the k-th input channel. For single input systems, this
keyword has no effect.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs.
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
Notes
-----
For Transfer models, first conversion to a state model is performed and
then the resulting model is used for computations.
"""
_check_for_state_or_transfer(sys)
# Quick initial condition checks
if x0 is not None:
if sys._isgain:
raise ValueError('Static system models can\'t have initial '
'conditions set.')
if isinstance(sys, Transfer):
raise ValueError('Transfer models can\'t have initial conditions '
'set.')
x0 = np.asarray(x0, dtype=float).squeeze()
if x0.ndim > 1:
raise ValueError('Initial condition can only be a 1D array.')
else:
x0 = x0[:, None]
if sys.NumberOfStates != x0.size:
raise ValueError('The initial condition size does not match the '
'number of states of the model.')
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
n, m = sys.NumberOfStates, sys.shape[1]
is_discrete = sys.SamplingSet == 'Z'
u = np.asarray(u, dtype=float).squeeze()
if u.ndim == 1:
u = u[:, None]
t = _check_u_and_t_for_simulation(m, sys._dt, u, t, is_discrete)
# input and time arrays are regular move on
# Static gains are simple matrix multiplications with no x0
if sys._isgain:
if sys._isSISO:
yout = u * sys.d.squeeze()
else:
# don't bother for single inputs
if m == 1:
per_channel = False
if per_channel:
yout = np.einsum('ij,jk->ikj', u, sys.d.T)
else:
yout = u @ sys.d.T
# Dynamic model
else:
# TODO: Add FOH discretization for funky input
# ZOH discretize the continuous system based on the time increment
if not is_discrete:
sys = discretize(sys, t[1]-t[0], method='zoh')
sample_num = len(u)
a, b, c, d = sys.matrices
# Bu and Du are constant matrices so get them ready (transposed)
M_u = np.block([b.T, d.T])
at = a.T
# Explicitly skip single inputs for per_channel
if m == 1:
per_channel = False
# Shape the response as a 3D array
if per_channel:
xout = np.empty([sample_num, n, m], dtype=float)
for col in range(m):
xout[0, :, col] = 0. if x0 is None else x0.T
Bu = u[:, [col]] @ b.T[[col], :]
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row, :, col] = xout[row-1, :, col] @ at + Bu[row-1]
# Get the output equation for each slice of inputs
# Cx + Du
yout = np.einsum('ijk,jl->ilk', xout, c.T) + \
np.einsum('ij,jk->ikj', u, d.T)
# Combined output
else:
BDu = u @ M_u
xout = np.empty([sample_num, n], dtype=float)
xout[0] = 0. if x0 is None else x0.T
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row] = (xout[row-1] @ at) + BDu[row-1, :n]
# Now we have all the state evolution get the output equation
yout = xout @ c.T + BDu[:, n:]
return yout, t
# MASKED: simulate_step_response function (lines 157-203)
def simulate_impulse_response(sys, t=None):
"""
Compute the linear model response to an Dirac delta pulse (or all-zeros
array except the first sample being 1/dt at each channel) sampled at given
time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
"""
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys, is_step=False)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.zeros([len(t), m], dtype=float)
u[0] = 1./ts
return simulate_linear_system(sys, u=u, t=t, per_channel=1)
def _compute_tfinal_and_dt(sys, is_step=True):
"""
Helper function to estimate a final time and a sampling period for
time domain simulations. It is essentially geared towards impulse response
but is also used for step responses.
For discrete-time models, obviously dt is inherent and only tfinal is
computed.
Parameters
----------
sys : {State, Transfer}
The system to be investigated
is_step : bool
Scales the dc value by the magnitude of the nonzero mode since
integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ.
Default is True.
Returns
-------
tfinal : float
The final time instance for which the simulation will be performed.
dt : float
The estimated sampling period for the simulation.
Notes
-----
Just by evaluating the fastest mode for dt and slowest for tfinal often
leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))
since dt will be very small and tfinal will be too large though the fast
mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]
and the simulation would be unnecessarily long and the plot is virtually
an L shape since the decay is so fast.
Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR
can be used such that only the modes that have significant effect on the
time response are taken. But the sensitivity of the eigenvalues complicate
the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work
with simple poles with this formulation. See Golub, Van Loan Section 7.2.2
for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of
the response is dependent on the size of the eigenshapes rather than the
eigenvalues themselves.
"""
sqrt_eps = np.sqrt(np.spacing(1.))
min_points = 100 # min number of points
min_points_z = 20 # min number of points
max_points = 10000 # max number of points
max_points_z = 75000 # max number of points for discrete models
default_tfinal = 5 # Default simulation horizon
total_cycles = 5 # number of cycles for oscillating modes
pts_per_cycle = 25 # Number of points divide a period of oscillation
log_decay_percent = np.log(100) # Factor of reduction for real pole decays
# if a static model is given, don't bother with checks
if sys._isgain:
if sys._isdiscrete:
return sys._dt*min_points_z, sys._dt
else:
return default_tfinal, default_tfinal / min_points
if sys._isdiscrete:
# System already has sampling fixed hence we can't fall into the same
# trap mentioned above. Just get nonintegrating slow modes together
# with the damping.
dt = sys._dt
tfinal = default_tfinal
p = eigvals(sys.a)
# Array Masks
# unstable
m_u = (np.abs(p) >= 1 + sqrt_eps)
p_u, p = p[m_u], p[~m_u]
if p_u.size > 0:
m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps)
t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u])/dt))
tfinal = max(tfinal, t_emp)
# zero - negligible effect on tfinal
m_z = np.abs(p) < sqrt_eps
p = p[~m_z]
# Negative reals- treated as oscillary mode
m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps)
p_nr, p = p[m_nr], p[~m_nr]
if p_nr.size > 0:
t_emp = np.max(log_decay_percent / np.abs((np.log(p_nr)/dt).real))
tfinal = max(tfinal, t_emp)
# discrete integrators
m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps)
p_int, p = p[m_int], p[~m_int]
# pure oscillatory modes
m_w = (np.abs(np.abs(p) - 1) < sqrt_eps)
p_w, p = p[m_w], p[~m_w]
if p_w.size > 0:
t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w)/dt).min()
tfinal = max(tfinal, t_emp)
if p.size > 0:
t_emp = log_decay_percent / np.abs((np.log(p)/dt).real).min()
tfinal = max(tfinal, t_emp)
if p_int.size > 0:
tfinal = tfinal * 5
# Make tfinal an integer multiple of dt
num_samples = tfinal // dt
if num_samples > max_points_z:
tfinal = dt * max_points_z
else:
tfinal = dt * num_samples
return tfinal, dt
# Improve conditioning via balancing and zeroing tiny entries
# See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance
b, (sca, perm) = matrix_balance(sys.a, separate=True)
p, l, r = eig(b, left=True, right=True)
# Reciprocal of inner product <w,v> for each λ, (bound the ~infs by 1e12)
# G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12)
eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
eig_sens = minimum(1e12, eig_sens)
# Tolerances
p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0.
# Incorporate balancing to outer factors
l[perm, :] *= reciprocal(sca)[:, None]
r[perm, :] *= sca[:, None]
w, v = sys.c @ r, l.T.conj() @ sys.b
origin = False
# Computing the "size" of the response of each simple mode
wn = np.abs(p)
if np.any(wn == 0.):
origin = True
dc = zeros_like(p, dtype=float)
# well-conditioned nonzero poles, np.abs just in case
ok = np.abs(eig_sens) <= 1/sqrt_eps
# the averaged t→∞ response of each simple λ on each i/o channel
# See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is
# R/L eigenvector dependent)
dc[ok] = norm(v[ok, :], axis=1)*norm(w[:, ok], axis=0)*eig_sens[ok]
dc[wn != 0.] /= wn[wn != 0] if is_step else 1.
dc[wn == 0.] = 0.
# double the oscillating mode magnitude for the conjugate
dc[p.imag != 0.] *= 2
# Now get rid of noncontributing integrators and simple modes if any
relevance = (dc > 0.1*dc.max()) | ~ok
psub = p[relevance]
wnsub = wn[relevance]
tfinal, dt = [], []
ints = wnsub == 0.
iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps)
# Pure imaginary?
if np.any(iw):
tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist()
dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist()
# The rest ~ts = log(%ss value) / exp(Re(λ)t)
texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real)
tfinal += texp_mode.tolist()
dt += minimum(texp_mode / 50,
(2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist()
# All integrators?
if len(tfinal) == 0:
return default_tfinal*5, default_tfinal*5/min_points
tfinal = np.max(tfinal)*(5 if origin else 1)
dt = np.min(dt)
dt = tfinal / max_points if tfinal // dt > max_points else dt
tfinal = dt * min_points if tfinal // dt < min_points else tfinal
return tfinal, dt
def _check_u_and_t_for_simulation(m, dt, u, t, isdiscrete):
"""
Helper function to validate the input arguments for simulate_linear_system
"""
# Discrete models can omit t array, make one here for convenience
if t is None:
if not isdiscrete:
raise ValueError('Continuous time models need an evenly spaced '
'time sequence from which the sampling period '
'will be obtained.')
else:
u_samples = len(u)
t = np.linspace(0, (u_samples-1)*dt, num=u_samples)
else:
t = np.asarray(t, dtype=float).squeeze()
if t.ndim != 1:
raise ValueError('Time array needs to be a 1D array.')
t_diff = np.diff(t)
if not np.allclose(t_diff, t_diff[0]) or not t_diff[0] > 0.:
raise ValueError('Time array should be equally spaced and '
'increasing.')
if isdiscrete and not np.isclose(dt, t_diff[0]):
raise ValueError('Time array increment {} is not equal to the'
' model sampling period {}.'.format(t_diff[0],
dt))
if u.size < 1:
raise ValueError('The input array should at least have one point.')
# First dimension is always # of samples
if len(u) != len(t):
raise ValueError('The input and time arrays should have the same'
' length. t: {} vs. u: {}'.format(t.shape,
u.shape))
if u.shape[1] != m:
raise ValueError('Number of input columns ({}) don\'t match the number'
' of inputs ({}) of the given model.'
''.format(u.shape[1], m))
return t
def _check_custom_time_input(t):
"""
Helper function for simple and rather expensive checks for sanity
"""
t = atleast_1d(t)
if t.ndim > 1:
t = squeeze(t)
if t.ndim > 1:
raise ValueError('Time array should be a 1D array but has '
'{} nontrivial dimensions'.format(t.ndim))
if t.size < 2:
raise ValueError('Time array should have at least two data points.')
dt = t[1] - t[0]
if dt <= 0.:
raise ValueError('The time increment dt cannot be negative; '
'Difference of the first two samples t1 - t0 = {}'
''.format(dt))
# np.diff is somewhat slower than the diff of the views
if not np.allclose(t[1:] - t[:-1], dt):
raise ValueError('Supplied time array is not numerically equally '
'spaced (checked via numpy.allclose).')
return t, dt | def simulate_step_response(sys, t=None):
"""
Compute the linear model response to an Heaviside function (or all-ones
array) sampled at given time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
"""
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.ones([len(t), m], dtype=float)
return simulate_linear_system(sys, u=u, t=t, per_channel=1) | 157 | 203 | import numpy as np
from numpy import (reciprocal, einsum, maximum, minimum, zeros_like,
atleast_1d, squeeze)
from scipy.linalg import eig, eigvals, matrix_balance, norm
from harold._classes import Transfer, transfer_to_state
from harold._discrete_funcs import discretize
from harold._arg_utils import _check_for_state, _check_for_state_or_transfer
__all__ = ['simulate_linear_system', 'simulate_step_response',
'simulate_impulse_response']
def simulate_linear_system(sys, u, t=None, x0=None, per_channel=False):
"""
Compute the linear model response to an input array sampled at given time
instances.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
u : array_like
The real-valued input sequence to force the model. 1D arrays for single
input models and 2D arrays that has as many columns as the number of
inputs are valid inputs.
t : array_like, optional
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
x0 : array_like, optional
The initial condition array. If omitted an array of zeros is assumed.
Note that Transfer models by definition assume zero initial conditions
and will raise an error.
per_channel : bool, optional
If this is set to True and if the system has multiple inputs, the
response of each input is returned individually. For example, if a
system has 4 inputs and 3 outputs then the response shape becomes
(num, p, m) instead of (num, p) where k-th slice (:, :, k) is the
response from the k-th input channel. For single input systems, this
keyword has no effect.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs.
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
Notes
-----
For Transfer models, first conversion to a state model is performed and
then the resulting model is used for computations.
"""
_check_for_state_or_transfer(sys)
# Quick initial condition checks
if x0 is not None:
if sys._isgain:
raise ValueError('Static system models can\'t have initial '
'conditions set.')
if isinstance(sys, Transfer):
raise ValueError('Transfer models can\'t have initial conditions '
'set.')
x0 = np.asarray(x0, dtype=float).squeeze()
if x0.ndim > 1:
raise ValueError('Initial condition can only be a 1D array.')
else:
x0 = x0[:, None]
if sys.NumberOfStates != x0.size:
raise ValueError('The initial condition size does not match the '
'number of states of the model.')
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
n, m = sys.NumberOfStates, sys.shape[1]
is_discrete = sys.SamplingSet == 'Z'
u = np.asarray(u, dtype=float).squeeze()
if u.ndim == 1:
u = u[:, None]
t = _check_u_and_t_for_simulation(m, sys._dt, u, t, is_discrete)
# input and time arrays are regular move on
# Static gains are simple matrix multiplications with no x0
if sys._isgain:
if sys._isSISO:
yout = u * sys.d.squeeze()
else:
# don't bother for single inputs
if m == 1:
per_channel = False
if per_channel:
yout = np.einsum('ij,jk->ikj', u, sys.d.T)
else:
yout = u @ sys.d.T
# Dynamic model
else:
# TODO: Add FOH discretization for funky input
# ZOH discretize the continuous system based on the time increment
if not is_discrete:
sys = discretize(sys, t[1]-t[0], method='zoh')
sample_num = len(u)
a, b, c, d = sys.matrices
# Bu and Du are constant matrices so get them ready (transposed)
M_u = np.block([b.T, d.T])
at = a.T
# Explicitly skip single inputs for per_channel
if m == 1:
per_channel = False
# Shape the response as a 3D array
if per_channel:
xout = np.empty([sample_num, n, m], dtype=float)
for col in range(m):
xout[0, :, col] = 0. if x0 is None else x0.T
Bu = u[:, [col]] @ b.T[[col], :]
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row, :, col] = xout[row-1, :, col] @ at + Bu[row-1]
# Get the output equation for each slice of inputs
# Cx + Du
yout = np.einsum('ijk,jl->ilk', xout, c.T) + \
np.einsum('ij,jk->ikj', u, d.T)
# Combined output
else:
BDu = u @ M_u
xout = np.empty([sample_num, n], dtype=float)
xout[0] = 0. if x0 is None else x0.T
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row] = (xout[row-1] @ at) + BDu[row-1, :n]
# Now we have all the state evolution get the output equation
yout = xout @ c.T + BDu[:, n:]
return yout, t
def simulate_step_response(sys, t=None):
"""
Compute the linear model response to an Heaviside function (or all-ones
array) sampled at given time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
"""
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.ones([len(t), m], dtype=float)
return simulate_linear_system(sys, u=u, t=t, per_channel=1)
def simulate_impulse_response(sys, t=None):
"""
Compute the linear model response to an Dirac delta pulse (or all-zeros
array except the first sample being 1/dt at each channel) sampled at given
time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
"""
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys, is_step=False)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.zeros([len(t), m], dtype=float)
u[0] = 1./ts
return simulate_linear_system(sys, u=u, t=t, per_channel=1)
def _compute_tfinal_and_dt(sys, is_step=True):
"""
Helper function to estimate a final time and a sampling period for
time domain simulations. It is essentially geared towards impulse response
but is also used for step responses.
For discrete-time models, obviously dt is inherent and only tfinal is
computed.
Parameters
----------
sys : {State, Transfer}
The system to be investigated
is_step : bool
Scales the dc value by the magnitude of the nonzero mode since
integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ.
Default is True.
Returns
-------
tfinal : float
The final time instance for which the simulation will be performed.
dt : float
The estimated sampling period for the simulation.
Notes
-----
Just by evaluating the fastest mode for dt and slowest for tfinal often
leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))
since dt will be very small and tfinal will be too large though the fast
mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]
and the simulation would be unnecessarily long and the plot is virtually
an L shape since the decay is so fast.
Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR
can be used such that only the modes that have significant effect on the
time response are taken. But the sensitivity of the eigenvalues complicate
the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work
with simple poles with this formulation. See Golub, Van Loan Section 7.2.2
for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of
the response is dependent on the size of the eigenshapes rather than the
eigenvalues themselves.
"""
sqrt_eps = np.sqrt(np.spacing(1.))
min_points = 100 # min number of points
min_points_z = 20 # min number of points
max_points = 10000 # max number of points
max_points_z = 75000 # max number of points for discrete models
default_tfinal = 5 # Default simulation horizon
total_cycles = 5 # number of cycles for oscillating modes
pts_per_cycle = 25 # Number of points divide a period of oscillation
log_decay_percent = np.log(100) # Factor of reduction for real pole decays
# if a static model is given, don't bother with checks
if sys._isgain:
if sys._isdiscrete:
return sys._dt*min_points_z, sys._dt
else:
return default_tfinal, default_tfinal / min_points
if sys._isdiscrete:
# System already has sampling fixed hence we can't fall into the same
# trap mentioned above. Just get nonintegrating slow modes together
# with the damping.
dt = sys._dt
tfinal = default_tfinal
p = eigvals(sys.a)
# Array Masks
# unstable
m_u = (np.abs(p) >= 1 + sqrt_eps)
p_u, p = p[m_u], p[~m_u]
if p_u.size > 0:
m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps)
t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u])/dt))
tfinal = max(tfinal, t_emp)
# zero - negligible effect on tfinal
m_z = np.abs(p) < sqrt_eps
p = p[~m_z]
# Negative reals- treated as oscillary mode
m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps)
p_nr, p = p[m_nr], p[~m_nr]
if p_nr.size > 0:
t_emp = np.max(log_decay_percent / np.abs((np.log(p_nr)/dt).real))
tfinal = max(tfinal, t_emp)
# discrete integrators
m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps)
p_int, p = p[m_int], p[~m_int]
# pure oscillatory modes
m_w = (np.abs(np.abs(p) - 1) < sqrt_eps)
p_w, p = p[m_w], p[~m_w]
if p_w.size > 0:
t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w)/dt).min()
tfinal = max(tfinal, t_emp)
if p.size > 0:
t_emp = log_decay_percent / np.abs((np.log(p)/dt).real).min()
tfinal = max(tfinal, t_emp)
if p_int.size > 0:
tfinal = tfinal * 5
# Make tfinal an integer multiple of dt
num_samples = tfinal // dt
if num_samples > max_points_z:
tfinal = dt * max_points_z
else:
tfinal = dt * num_samples
return tfinal, dt
# Improve conditioning via balancing and zeroing tiny entries
# See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance
b, (sca, perm) = matrix_balance(sys.a, separate=True)
p, l, r = eig(b, left=True, right=True)
# Reciprocal of inner product <w,v> for each λ, (bound the ~infs by 1e12)
# G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12)
eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
eig_sens = minimum(1e12, eig_sens)
# Tolerances
p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0.
# Incorporate balancing to outer factors
l[perm, :] *= reciprocal(sca)[:, None]
r[perm, :] *= sca[:, None]
w, v = sys.c @ r, l.T.conj() @ sys.b
origin = False
# Computing the "size" of the response of each simple mode
wn = np.abs(p)
if np.any(wn == 0.):
origin = True
dc = zeros_like(p, dtype=float)
# well-conditioned nonzero poles, np.abs just in case
ok = np.abs(eig_sens) <= 1/sqrt_eps
# the averaged t→∞ response of each simple λ on each i/o channel
# See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is
# R/L eigenvector dependent)
dc[ok] = norm(v[ok, :], axis=1)*norm(w[:, ok], axis=0)*eig_sens[ok]
dc[wn != 0.] /= wn[wn != 0] if is_step else 1.
dc[wn == 0.] = 0.
# double the oscillating mode magnitude for the conjugate
dc[p.imag != 0.] *= 2
# Now get rid of noncontributing integrators and simple modes if any
relevance = (dc > 0.1*dc.max()) | ~ok
psub = p[relevance]
wnsub = wn[relevance]
tfinal, dt = [], []
ints = wnsub == 0.
iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps)
# Pure imaginary?
if np.any(iw):
tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist()
dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist()
# The rest ~ts = log(%ss value) / exp(Re(λ)t)
texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real)
tfinal += texp_mode.tolist()
dt += minimum(texp_mode / 50,
(2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist()
# All integrators?
if len(tfinal) == 0:
return default_tfinal*5, default_tfinal*5/min_points
tfinal = np.max(tfinal)*(5 if origin else 1)
dt = np.min(dt)
dt = tfinal / max_points if tfinal // dt > max_points else dt
tfinal = dt * min_points if tfinal // dt < min_points else tfinal
return tfinal, dt
def _check_u_and_t_for_simulation(m, dt, u, t, isdiscrete):
"""
Helper function to validate the input arguments for simulate_linear_system
"""
# Discrete models can omit t array, make one here for convenience
if t is None:
if not isdiscrete:
raise ValueError('Continuous time models need an evenly spaced '
'time sequence from which the sampling period '
'will be obtained.')
else:
u_samples = len(u)
t = np.linspace(0, (u_samples-1)*dt, num=u_samples)
else:
t = np.asarray(t, dtype=float).squeeze()
if t.ndim != 1:
raise ValueError('Time array needs to be a 1D array.')
t_diff = np.diff(t)
if not np.allclose(t_diff, t_diff[0]) or not t_diff[0] > 0.:
raise ValueError('Time array should be equally spaced and '
'increasing.')
if isdiscrete and not np.isclose(dt, t_diff[0]):
raise ValueError('Time array increment {} is not equal to the'
' model sampling period {}.'.format(t_diff[0],
dt))
if u.size < 1:
raise ValueError('The input array should at least have one point.')
# First dimension is always # of samples
if len(u) != len(t):
raise ValueError('The input and time arrays should have the same'
' length. t: {} vs. u: {}'.format(t.shape,
u.shape))
if u.shape[1] != m:
raise ValueError('Number of input columns ({}) don\'t match the number'
' of inputs ({}) of the given model.'
''.format(u.shape[1], m))
return t
def _check_custom_time_input(t):
"""
Helper function for simple and rather expensive checks for sanity
"""
t = atleast_1d(t)
if t.ndim > 1:
t = squeeze(t)
if t.ndim > 1:
raise ValueError('Time array should be a 1D array but has '
'{} nontrivial dimensions'.format(t.ndim))
if t.size < 2:
raise ValueError('Time array should have at least two data points.')
dt = t[1] - t[0]
if dt <= 0.:
raise ValueError('The time increment dt cannot be negative; '
'Difference of the first two samples t1 - t0 = {}'
''.format(dt))
# np.diff is somewhat slower than the diff of the views
if not np.allclose(t[1:] - t[:-1], dt):
raise ValueError('Supplied time array is not numerically equally '
'spaced (checked via numpy.allclose).')
return t, dt
|
simulate_impulse_response | Compute the linear model response to an Dirac delta pulse (or all-zeros
array except the first sample being 1/dt at each channel) sampled at given
time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given. | import numpy as np
from numpy import (reciprocal, einsum, maximum, minimum, zeros_like,
atleast_1d, squeeze)
from scipy.linalg import eig, eigvals, matrix_balance, norm
from harold._classes import Transfer, transfer_to_state
from harold._discrete_funcs import discretize
from harold._arg_utils import _check_for_state, _check_for_state_or_transfer
__all__ = ['simulate_linear_system', 'simulate_step_response',
'simulate_impulse_response']
def simulate_linear_system(sys, u, t=None, x0=None, per_channel=False):
"""
Compute the linear model response to an input array sampled at given time
instances.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
u : array_like
The real-valued input sequence to force the model. 1D arrays for single
input models and 2D arrays that has as many columns as the number of
inputs are valid inputs.
t : array_like, optional
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
x0 : array_like, optional
The initial condition array. If omitted an array of zeros is assumed.
Note that Transfer models by definition assume zero initial conditions
and will raise an error.
per_channel : bool, optional
If this is set to True and if the system has multiple inputs, the
response of each input is returned individually. For example, if a
system has 4 inputs and 3 outputs then the response shape becomes
(num, p, m) instead of (num, p) where k-th slice (:, :, k) is the
response from the k-th input channel. For single input systems, this
keyword has no effect.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs.
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
Notes
-----
For Transfer models, first conversion to a state model is performed and
then the resulting model is used for computations.
"""
_check_for_state_or_transfer(sys)
# Quick initial condition checks
if x0 is not None:
if sys._isgain:
raise ValueError('Static system models can\'t have initial '
'conditions set.')
if isinstance(sys, Transfer):
raise ValueError('Transfer models can\'t have initial conditions '
'set.')
x0 = np.asarray(x0, dtype=float).squeeze()
if x0.ndim > 1:
raise ValueError('Initial condition can only be a 1D array.')
else:
x0 = x0[:, None]
if sys.NumberOfStates != x0.size:
raise ValueError('The initial condition size does not match the '
'number of states of the model.')
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
n, m = sys.NumberOfStates, sys.shape[1]
is_discrete = sys.SamplingSet == 'Z'
u = np.asarray(u, dtype=float).squeeze()
if u.ndim == 1:
u = u[:, None]
t = _check_u_and_t_for_simulation(m, sys._dt, u, t, is_discrete)
# input and time arrays are regular move on
# Static gains are simple matrix multiplications with no x0
if sys._isgain:
if sys._isSISO:
yout = u * sys.d.squeeze()
else:
# don't bother for single inputs
if m == 1:
per_channel = False
if per_channel:
yout = np.einsum('ij,jk->ikj', u, sys.d.T)
else:
yout = u @ sys.d.T
# Dynamic model
else:
# TODO: Add FOH discretization for funky input
# ZOH discretize the continuous system based on the time increment
if not is_discrete:
sys = discretize(sys, t[1]-t[0], method='zoh')
sample_num = len(u)
a, b, c, d = sys.matrices
# Bu and Du are constant matrices so get them ready (transposed)
M_u = np.block([b.T, d.T])
at = a.T
# Explicitly skip single inputs for per_channel
if m == 1:
per_channel = False
# Shape the response as a 3D array
if per_channel:
xout = np.empty([sample_num, n, m], dtype=float)
for col in range(m):
xout[0, :, col] = 0. if x0 is None else x0.T
Bu = u[:, [col]] @ b.T[[col], :]
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row, :, col] = xout[row-1, :, col] @ at + Bu[row-1]
# Get the output equation for each slice of inputs
# Cx + Du
yout = np.einsum('ijk,jl->ilk', xout, c.T) + \
np.einsum('ij,jk->ikj', u, d.T)
# Combined output
else:
BDu = u @ M_u
xout = np.empty([sample_num, n], dtype=float)
xout[0] = 0. if x0 is None else x0.T
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row] = (xout[row-1] @ at) + BDu[row-1, :n]
# Now we have all the state evolution get the output equation
yout = xout @ c.T + BDu[:, n:]
return yout, t
def simulate_step_response(sys, t=None):
"""
Compute the linear model response to an Heaviside function (or all-ones
array) sampled at given time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
"""
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.ones([len(t), m], dtype=float)
return simulate_linear_system(sys, u=u, t=t, per_channel=1)
# MASKED: simulate_impulse_response function (lines 206-254)
def _compute_tfinal_and_dt(sys, is_step=True):
"""
Helper function to estimate a final time and a sampling period for
time domain simulations. It is essentially geared towards impulse response
but is also used for step responses.
For discrete-time models, obviously dt is inherent and only tfinal is
computed.
Parameters
----------
sys : {State, Transfer}
The system to be investigated
is_step : bool
Scales the dc value by the magnitude of the nonzero mode since
integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ.
Default is True.
Returns
-------
tfinal : float
The final time instance for which the simulation will be performed.
dt : float
The estimated sampling period for the simulation.
Notes
-----
Just by evaluating the fastest mode for dt and slowest for tfinal often
leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))
since dt will be very small and tfinal will be too large though the fast
mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]
and the simulation would be unnecessarily long and the plot is virtually
an L shape since the decay is so fast.
Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR
can be used such that only the modes that have significant effect on the
time response are taken. But the sensitivity of the eigenvalues complicate
the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work
with simple poles with this formulation. See Golub, Van Loan Section 7.2.2
for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of
the response is dependent on the size of the eigenshapes rather than the
eigenvalues themselves.
"""
sqrt_eps = np.sqrt(np.spacing(1.))
min_points = 100 # min number of points
min_points_z = 20 # min number of points
max_points = 10000 # max number of points
max_points_z = 75000 # max number of points for discrete models
default_tfinal = 5 # Default simulation horizon
total_cycles = 5 # number of cycles for oscillating modes
pts_per_cycle = 25 # Number of points divide a period of oscillation
log_decay_percent = np.log(100) # Factor of reduction for real pole decays
# if a static model is given, don't bother with checks
if sys._isgain:
if sys._isdiscrete:
return sys._dt*min_points_z, sys._dt
else:
return default_tfinal, default_tfinal / min_points
if sys._isdiscrete:
# System already has sampling fixed hence we can't fall into the same
# trap mentioned above. Just get nonintegrating slow modes together
# with the damping.
dt = sys._dt
tfinal = default_tfinal
p = eigvals(sys.a)
# Array Masks
# unstable
m_u = (np.abs(p) >= 1 + sqrt_eps)
p_u, p = p[m_u], p[~m_u]
if p_u.size > 0:
m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps)
t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u])/dt))
tfinal = max(tfinal, t_emp)
# zero - negligible effect on tfinal
m_z = np.abs(p) < sqrt_eps
p = p[~m_z]
# Negative reals- treated as oscillary mode
m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps)
p_nr, p = p[m_nr], p[~m_nr]
if p_nr.size > 0:
t_emp = np.max(log_decay_percent / np.abs((np.log(p_nr)/dt).real))
tfinal = max(tfinal, t_emp)
# discrete integrators
m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps)
p_int, p = p[m_int], p[~m_int]
# pure oscillatory modes
m_w = (np.abs(np.abs(p) - 1) < sqrt_eps)
p_w, p = p[m_w], p[~m_w]
if p_w.size > 0:
t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w)/dt).min()
tfinal = max(tfinal, t_emp)
if p.size > 0:
t_emp = log_decay_percent / np.abs((np.log(p)/dt).real).min()
tfinal = max(tfinal, t_emp)
if p_int.size > 0:
tfinal = tfinal * 5
# Make tfinal an integer multiple of dt
num_samples = tfinal // dt
if num_samples > max_points_z:
tfinal = dt * max_points_z
else:
tfinal = dt * num_samples
return tfinal, dt
# Improve conditioning via balancing and zeroing tiny entries
# See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance
b, (sca, perm) = matrix_balance(sys.a, separate=True)
p, l, r = eig(b, left=True, right=True)
# Reciprocal of inner product <w,v> for each λ, (bound the ~infs by 1e12)
# G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12)
eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
eig_sens = minimum(1e12, eig_sens)
# Tolerances
p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0.
# Incorporate balancing to outer factors
l[perm, :] *= reciprocal(sca)[:, None]
r[perm, :] *= sca[:, None]
w, v = sys.c @ r, l.T.conj() @ sys.b
origin = False
# Computing the "size" of the response of each simple mode
wn = np.abs(p)
if np.any(wn == 0.):
origin = True
dc = zeros_like(p, dtype=float)
# well-conditioned nonzero poles, np.abs just in case
ok = np.abs(eig_sens) <= 1/sqrt_eps
# the averaged t→∞ response of each simple λ on each i/o channel
# See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is
# R/L eigenvector dependent)
dc[ok] = norm(v[ok, :], axis=1)*norm(w[:, ok], axis=0)*eig_sens[ok]
dc[wn != 0.] /= wn[wn != 0] if is_step else 1.
dc[wn == 0.] = 0.
# double the oscillating mode magnitude for the conjugate
dc[p.imag != 0.] *= 2
# Now get rid of noncontributing integrators and simple modes if any
relevance = (dc > 0.1*dc.max()) | ~ok
psub = p[relevance]
wnsub = wn[relevance]
tfinal, dt = [], []
ints = wnsub == 0.
iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps)
# Pure imaginary?
if np.any(iw):
tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist()
dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist()
# The rest ~ts = log(%ss value) / exp(Re(λ)t)
texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real)
tfinal += texp_mode.tolist()
dt += minimum(texp_mode / 50,
(2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist()
# All integrators?
if len(tfinal) == 0:
return default_tfinal*5, default_tfinal*5/min_points
tfinal = np.max(tfinal)*(5 if origin else 1)
dt = np.min(dt)
dt = tfinal / max_points if tfinal // dt > max_points else dt
tfinal = dt * min_points if tfinal // dt < min_points else tfinal
return tfinal, dt
def _check_u_and_t_for_simulation(m, dt, u, t, isdiscrete):
"""
Helper function to validate the input arguments for simulate_linear_system
"""
# Discrete models can omit t array, make one here for convenience
if t is None:
if not isdiscrete:
raise ValueError('Continuous time models need an evenly spaced '
'time sequence from which the sampling period '
'will be obtained.')
else:
u_samples = len(u)
t = np.linspace(0, (u_samples-1)*dt, num=u_samples)
else:
t = np.asarray(t, dtype=float).squeeze()
if t.ndim != 1:
raise ValueError('Time array needs to be a 1D array.')
t_diff = np.diff(t)
if not np.allclose(t_diff, t_diff[0]) or not t_diff[0] > 0.:
raise ValueError('Time array should be equally spaced and '
'increasing.')
if isdiscrete and not np.isclose(dt, t_diff[0]):
raise ValueError('Time array increment {} is not equal to the'
' model sampling period {}.'.format(t_diff[0],
dt))
if u.size < 1:
raise ValueError('The input array should at least have one point.')
# First dimension is always # of samples
if len(u) != len(t):
raise ValueError('The input and time arrays should have the same'
' length. t: {} vs. u: {}'.format(t.shape,
u.shape))
if u.shape[1] != m:
raise ValueError('Number of input columns ({}) don\'t match the number'
' of inputs ({}) of the given model.'
''.format(u.shape[1], m))
return t
def _check_custom_time_input(t):
"""
Helper function for simple and rather expensive checks for sanity
"""
t = atleast_1d(t)
if t.ndim > 1:
t = squeeze(t)
if t.ndim > 1:
raise ValueError('Time array should be a 1D array but has '
'{} nontrivial dimensions'.format(t.ndim))
if t.size < 2:
raise ValueError('Time array should have at least two data points.')
dt = t[1] - t[0]
if dt <= 0.:
raise ValueError('The time increment dt cannot be negative; '
'Difference of the first two samples t1 - t0 = {}'
''.format(dt))
# np.diff is somewhat slower than the diff of the views
if not np.allclose(t[1:] - t[:-1], dt):
raise ValueError('Supplied time array is not numerically equally '
'spaced (checked via numpy.allclose).')
return t, dt | def simulate_impulse_response(sys, t=None):
"""
Compute the linear model response to an Dirac delta pulse (or all-zeros
array except the first sample being 1/dt at each channel) sampled at given
time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
"""
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys, is_step=False)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.zeros([len(t), m], dtype=float)
u[0] = 1./ts
return simulate_linear_system(sys, u=u, t=t, per_channel=1) | 206 | 254 | import numpy as np
from numpy import (reciprocal, einsum, maximum, minimum, zeros_like,
atleast_1d, squeeze)
from scipy.linalg import eig, eigvals, matrix_balance, norm
from harold._classes import Transfer, transfer_to_state
from harold._discrete_funcs import discretize
from harold._arg_utils import _check_for_state, _check_for_state_or_transfer
__all__ = ['simulate_linear_system', 'simulate_step_response',
'simulate_impulse_response']
def simulate_linear_system(sys, u, t=None, x0=None, per_channel=False):
"""
Compute the linear model response to an input array sampled at given time
instances.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
u : array_like
The real-valued input sequence to force the model. 1D arrays for single
input models and 2D arrays that has as many columns as the number of
inputs are valid inputs.
t : array_like, optional
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
x0 : array_like, optional
The initial condition array. If omitted an array of zeros is assumed.
Note that Transfer models by definition assume zero initial conditions
and will raise an error.
per_channel : bool, optional
If this is set to True and if the system has multiple inputs, the
response of each input is returned individually. For example, if a
system has 4 inputs and 3 outputs then the response shape becomes
(num, p, m) instead of (num, p) where k-th slice (:, :, k) is the
response from the k-th input channel. For single input systems, this
keyword has no effect.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs.
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
Notes
-----
For Transfer models, first conversion to a state model is performed and
then the resulting model is used for computations.
"""
_check_for_state_or_transfer(sys)
# Quick initial condition checks
if x0 is not None:
if sys._isgain:
raise ValueError('Static system models can\'t have initial '
'conditions set.')
if isinstance(sys, Transfer):
raise ValueError('Transfer models can\'t have initial conditions '
'set.')
x0 = np.asarray(x0, dtype=float).squeeze()
if x0.ndim > 1:
raise ValueError('Initial condition can only be a 1D array.')
else:
x0 = x0[:, None]
if sys.NumberOfStates != x0.size:
raise ValueError('The initial condition size does not match the '
'number of states of the model.')
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
n, m = sys.NumberOfStates, sys.shape[1]
is_discrete = sys.SamplingSet == 'Z'
u = np.asarray(u, dtype=float).squeeze()
if u.ndim == 1:
u = u[:, None]
t = _check_u_and_t_for_simulation(m, sys._dt, u, t, is_discrete)
# input and time arrays are regular move on
# Static gains are simple matrix multiplications with no x0
if sys._isgain:
if sys._isSISO:
yout = u * sys.d.squeeze()
else:
# don't bother for single inputs
if m == 1:
per_channel = False
if per_channel:
yout = np.einsum('ij,jk->ikj', u, sys.d.T)
else:
yout = u @ sys.d.T
# Dynamic model
else:
# TODO: Add FOH discretization for funky input
# ZOH discretize the continuous system based on the time increment
if not is_discrete:
sys = discretize(sys, t[1]-t[0], method='zoh')
sample_num = len(u)
a, b, c, d = sys.matrices
# Bu and Du are constant matrices so get them ready (transposed)
M_u = np.block([b.T, d.T])
at = a.T
# Explicitly skip single inputs for per_channel
if m == 1:
per_channel = False
# Shape the response as a 3D array
if per_channel:
xout = np.empty([sample_num, n, m], dtype=float)
for col in range(m):
xout[0, :, col] = 0. if x0 is None else x0.T
Bu = u[:, [col]] @ b.T[[col], :]
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row, :, col] = xout[row-1, :, col] @ at + Bu[row-1]
# Get the output equation for each slice of inputs
# Cx + Du
yout = np.einsum('ijk,jl->ilk', xout, c.T) + \
np.einsum('ij,jk->ikj', u, d.T)
# Combined output
else:
BDu = u @ M_u
xout = np.empty([sample_num, n], dtype=float)
xout[0] = 0. if x0 is None else x0.T
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row] = (xout[row-1] @ at) + BDu[row-1, :n]
# Now we have all the state evolution get the output equation
yout = xout @ c.T + BDu[:, n:]
return yout, t
def simulate_step_response(sys, t=None):
"""
Compute the linear model response to an Heaviside function (or all-ones
array) sampled at given time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
"""
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.ones([len(t), m], dtype=float)
return simulate_linear_system(sys, u=u, t=t, per_channel=1)
def simulate_impulse_response(sys, t=None):
"""
Compute the linear model response to an Dirac delta pulse (or all-zeros
array except the first sample being 1/dt at each channel) sampled at given
time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
"""
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys, is_step=False)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.zeros([len(t), m], dtype=float)
u[0] = 1./ts
return simulate_linear_system(sys, u=u, t=t, per_channel=1)
def _compute_tfinal_and_dt(sys, is_step=True):
"""
Helper function to estimate a final time and a sampling period for
time domain simulations. It is essentially geared towards impulse response
but is also used for step responses.
For discrete-time models, obviously dt is inherent and only tfinal is
computed.
Parameters
----------
sys : {State, Transfer}
The system to be investigated
is_step : bool
Scales the dc value by the magnitude of the nonzero mode since
integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ.
Default is True.
Returns
-------
tfinal : float
The final time instance for which the simulation will be performed.
dt : float
The estimated sampling period for the simulation.
Notes
-----
Just by evaluating the fastest mode for dt and slowest for tfinal often
leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))
since dt will be very small and tfinal will be too large though the fast
mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]
and the simulation would be unnecessarily long and the plot is virtually
an L shape since the decay is so fast.
Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR
can be used such that only the modes that have significant effect on the
time response are taken. But the sensitivity of the eigenvalues complicate
the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work
with simple poles with this formulation. See Golub, Van Loan Section 7.2.2
for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of
the response is dependent on the size of the eigenshapes rather than the
eigenvalues themselves.
"""
sqrt_eps = np.sqrt(np.spacing(1.))
min_points = 100 # min number of points
min_points_z = 20 # min number of points
max_points = 10000 # max number of points
max_points_z = 75000 # max number of points for discrete models
default_tfinal = 5 # Default simulation horizon
total_cycles = 5 # number of cycles for oscillating modes
pts_per_cycle = 25 # Number of points divide a period of oscillation
log_decay_percent = np.log(100) # Factor of reduction for real pole decays
# if a static model is given, don't bother with checks
if sys._isgain:
if sys._isdiscrete:
return sys._dt*min_points_z, sys._dt
else:
return default_tfinal, default_tfinal / min_points
if sys._isdiscrete:
# System already has sampling fixed hence we can't fall into the same
# trap mentioned above. Just get nonintegrating slow modes together
# with the damping.
dt = sys._dt
tfinal = default_tfinal
p = eigvals(sys.a)
# Array Masks
# unstable
m_u = (np.abs(p) >= 1 + sqrt_eps)
p_u, p = p[m_u], p[~m_u]
if p_u.size > 0:
m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps)
t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u])/dt))
tfinal = max(tfinal, t_emp)
# zero - negligible effect on tfinal
m_z = np.abs(p) < sqrt_eps
p = p[~m_z]
# Negative reals- treated as oscillary mode
m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps)
p_nr, p = p[m_nr], p[~m_nr]
if p_nr.size > 0:
t_emp = np.max(log_decay_percent / np.abs((np.log(p_nr)/dt).real))
tfinal = max(tfinal, t_emp)
# discrete integrators
m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps)
p_int, p = p[m_int], p[~m_int]
# pure oscillatory modes
m_w = (np.abs(np.abs(p) - 1) < sqrt_eps)
p_w, p = p[m_w], p[~m_w]
if p_w.size > 0:
t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w)/dt).min()
tfinal = max(tfinal, t_emp)
if p.size > 0:
t_emp = log_decay_percent / np.abs((np.log(p)/dt).real).min()
tfinal = max(tfinal, t_emp)
if p_int.size > 0:
tfinal = tfinal * 5
# Make tfinal an integer multiple of dt
num_samples = tfinal // dt
if num_samples > max_points_z:
tfinal = dt * max_points_z
else:
tfinal = dt * num_samples
return tfinal, dt
# Improve conditioning via balancing and zeroing tiny entries
# See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance
b, (sca, perm) = matrix_balance(sys.a, separate=True)
p, l, r = eig(b, left=True, right=True)
# Reciprocal of inner product <w,v> for each λ, (bound the ~infs by 1e12)
# G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12)
eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
eig_sens = minimum(1e12, eig_sens)
# Tolerances
p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0.
# Incorporate balancing to outer factors
l[perm, :] *= reciprocal(sca)[:, None]
r[perm, :] *= sca[:, None]
w, v = sys.c @ r, l.T.conj() @ sys.b
origin = False
# Computing the "size" of the response of each simple mode
wn = np.abs(p)
if np.any(wn == 0.):
origin = True
dc = zeros_like(p, dtype=float)
# well-conditioned nonzero poles, np.abs just in case
ok = np.abs(eig_sens) <= 1/sqrt_eps
# the averaged t→∞ response of each simple λ on each i/o channel
# See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is
# R/L eigenvector dependent)
dc[ok] = norm(v[ok, :], axis=1)*norm(w[:, ok], axis=0)*eig_sens[ok]
dc[wn != 0.] /= wn[wn != 0] if is_step else 1.
dc[wn == 0.] = 0.
# double the oscillating mode magnitude for the conjugate
dc[p.imag != 0.] *= 2
# Now get rid of noncontributing integrators and simple modes if any
relevance = (dc > 0.1*dc.max()) | ~ok
psub = p[relevance]
wnsub = wn[relevance]
tfinal, dt = [], []
ints = wnsub == 0.
iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps)
# Pure imaginary?
if np.any(iw):
tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist()
dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist()
# The rest ~ts = log(%ss value) / exp(Re(λ)t)
texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real)
tfinal += texp_mode.tolist()
dt += minimum(texp_mode / 50,
(2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist()
# All integrators?
if len(tfinal) == 0:
return default_tfinal*5, default_tfinal*5/min_points
tfinal = np.max(tfinal)*(5 if origin else 1)
dt = np.min(dt)
dt = tfinal / max_points if tfinal // dt > max_points else dt
tfinal = dt * min_points if tfinal // dt < min_points else tfinal
return tfinal, dt
def _check_u_and_t_for_simulation(m, dt, u, t, isdiscrete):
"""
Helper function to validate the input arguments for simulate_linear_system
"""
# Discrete models can omit t array, make one here for convenience
if t is None:
if not isdiscrete:
raise ValueError('Continuous time models need an evenly spaced '
'time sequence from which the sampling period '
'will be obtained.')
else:
u_samples = len(u)
t = np.linspace(0, (u_samples-1)*dt, num=u_samples)
else:
t = np.asarray(t, dtype=float).squeeze()
if t.ndim != 1:
raise ValueError('Time array needs to be a 1D array.')
t_diff = np.diff(t)
if not np.allclose(t_diff, t_diff[0]) or not t_diff[0] > 0.:
raise ValueError('Time array should be equally spaced and '
'increasing.')
if isdiscrete and not np.isclose(dt, t_diff[0]):
raise ValueError('Time array increment {} is not equal to the'
' model sampling period {}.'.format(t_diff[0],
dt))
if u.size < 1:
raise ValueError('The input array should at least have one point.')
# First dimension is always # of samples
if len(u) != len(t):
raise ValueError('The input and time arrays should have the same'
' length. t: {} vs. u: {}'.format(t.shape,
u.shape))
if u.shape[1] != m:
raise ValueError('Number of input columns ({}) don\'t match the number'
' of inputs ({}) of the given model.'
''.format(u.shape[1], m))
return t
def _check_custom_time_input(t):
"""
Helper function for simple and rather expensive checks for sanity
"""
t = atleast_1d(t)
if t.ndim > 1:
t = squeeze(t)
if t.ndim > 1:
raise ValueError('Time array should be a 1D array but has '
'{} nontrivial dimensions'.format(t.ndim))
if t.size < 2:
raise ValueError('Time array should have at least two data points.')
dt = t[1] - t[0]
if dt <= 0.:
raise ValueError('The time increment dt cannot be negative; '
'Difference of the first two samples t1 - t0 = {}'
''.format(dt))
# np.diff is somewhat slower than the diff of the views
if not np.allclose(t[1:] - t[:-1], dt):
raise ValueError('Supplied time array is not numerically equally '
'spaced (checked via numpy.allclose).')
return t, dt
|
XLRDParser | Parses old excel file into tableData object. Only first sheet.
Dont use this directly, use
td=TableData('xsl', infile)
td=TableData.load=table(infile)
instead
xlrd uses UTF16. What comes out of here?
TO DO:
1. better tests for
-Unicode issues not tested
-Excel data fields change appearance
2. conversion/transformation stuff | import os
'''
TableData deals with data that comes from MS Excel, csv, xml. More precisely, it expects
a single table which has headings in the first row. It converts between these formats and usually keeps
information on a round trip between those formats identical.
TableData also allows for simple transformations, like dropping a column.
CONVENTIONS
*cid is column no or column id
*rid is row no or row id
*cell refers the content of a cell, a cell is represented by cid|rid, as two integers or (not sure yet) a tuple or a list
*cname is the column name (in row 0)
NOTE
* (x|y) not rows x cols
* Currently internal cells do have a type, which may be flattened to str if output is type agnostic.
* cid and rid begins with 0, so first cell is 0|0, but ncols and nrows start at 1. Strangely enough, sometimes that is convenient.
* interface prefers cname over cid
LIMITATIONS
Data is stored in memory (in a two dimensional list of lists), so max. size depends on available memory (ram).
WHAT NOT TO DO
I will NOT allow conversion INTO Excel xsl format, only reading from it.
I will not abstract this thing too far. I write it for my current Excel version and the csv flavor that I
need (e.g. csv is escaped only for values that contain commas). I don't need multiple Excel sheets,
formatting in Excel, lots of types in Excel.
UNICODE
I am going for UTF-8 encoding, but not sure I have it everywhere yet. xlrd is internally in UTF16LE, I believe.
Roundtrip Exceptions
*date
XML Format made by TableData is
<tdx>
<row>
<cnameA>cell value</cnameA>
<cnameB>cell value</cnameB>
...
</row>
</tdx>
The first row will have all columns, even empty ones. The other rows usually omit empty elements with empty values.
'''
class TableData:
def verbose (self, msg):
if self._verbose:
print (msg)
def _uniqueColumns (self):
'''
raise exception if column names (cnames) are not unique
'''
if len(set(self.table[0])) != len(self.table[0]):
raise Exception('Column names not unique')
def __init__ (self, ingester, infile, verbose=None):
self._verbose=verbose
if ingester == 'xml':
self.XMLParser(infile)
elif ingester == 'xls':
self.XLRDParser(infile)
elif ingester == 'csv':
self.CSVParser(infile)
elif ingester == 'json':
self.JSONParser(infile)
#todo: modern excel
else:
raise Exception ('Ingester %s not found' % ingester)
self._uniqueColumns()
#
# INGESTERS (xml, csv)
#
def load_table (path, verbose=None):
'''
File extension aware ingester
td=TableData.load_table(path)
This is an alternative to _init_. Is this pythonic enough?
'''
ext=os.path.splitext(path)[1][1:]
return TableData (ext, path,verbose)
# MASKED: XLRDParser function (lines 96-148)
def CSVParser (self,infile):
import csv
self.table=[] # will hold sheet in memory as list of list
self.verbose ('csvParser: ' + str(infile))
with open(infile, mode='r', newline='') as csvfile:
incsv = csv.reader(csvfile, dialect='excel')
for row in incsv:
self.table.append(row)
#self.verbose (str(row))
def XMLParser (self,infile):
#It is practically impossible to reconstruct the full list of columns from xml file
#if xmlWriter leaves out empty elements. Instead, I write them at least for first row.
self.table=[] # will hold sheet in memory as list of list; overwrite
self.verbose ('xml infile %s' % infile)
import xml.etree.ElementTree as ET
tree = ET.parse(infile)
for row in tree.iter("row"):
c=0
cnames=[]
col=[]
for e in row.iter():
if e.tag !='row':
#self.verbose ('%s %s' % (e.tag, e.text))
if len(self.table) == 0:
#need to create 2 rows from first row in xml
cnames.append(e.tag)
col.append(e.text)
if len(self.table) == 0:
self.table.append(cnames)
self.table.append(col)
#self.verbose (self.table)
def JSONParser (self, infile):
self.table=[] # will hold sheet in memory as list of list; overwrite
import json
self.verbose ('json infile %s' % infile)
json_data = open(infile, 'r').read()
self.table = json.loads(json_data)
##
## read table data, but NO manipulations
##
def ncols(self):
'''
Returns integer with number of columns in table data
'''
return len(self.table[0])
def nrows (self):
'''
Returns integer with number of rows in table data
'''
return len(self.table)
def cell (self, col,row):
'''
Return a cell for col,row.
td.cell(col,row)
Throws exception if col or row are not integer or out of range.
What happens on empty cell?
I stick to x|y format, although row|col might be more pythonic.
Empty cell is '' not None.
'''
try:
return self.table[row][col]
except:
self.verbose ('%i|%i doesnt exist' % (col, row))
exit (1)
def cindex (self,needle):
'''
Returns the column index (c) for column name 'needle'.
Throws 'not in list' if 'needle' is not a column name (cname).
'''
return self.table[0].index(needle)
def colExists (self, cname):
try:
self.table[0].index(cname)
return True
except:
return False
def search (self, needle):
'''
Returns list of cells [cid,rid] that contain the needle.
r=td.search(needle) # (1,1)
tuples, lists? I am not quite sure!
'''
results=[]
for rid in range(0, self.nrows()):
for cid in range(0, self.ncols()):
cell=self.cell(cid, rid)
#self.verbose ('ce:'+str(cell))
if str(needle) in str(cell):
#self.verbose ("%i/%i:%s->%s" % (cid, rid, cell, needle))
results.append ((cid,rid))
return results
def search_col (self, cname, needle):
'''
Returns list/set of rows that contain the needle for the given col.
td.search(cname, needle)
'''
results=()
c=cindex(cname)
for rid in range(0, self.nrows()):
if needle in self.cell(c,rid):
results.append(rid)
def show (self):
'''
print representation of table
Really print? Why not.
'''
for row in self.table:
print (row)
print ('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows()))
##
## SIMPLE UNCONDITIONAL TRANSFORMATIONS
##
def delRow (self, r):
'''
Drop a row by number.
Need to remake the index to cover the hole.
'''
#r always means rid
self.table.pop(r)
#print ('row %i deleted' % r)
def delCol (self, cname):
'''
Drop a column by cname
(Not tested.)
'''
c=self.cindex (cname)
for r in range(0, self.nrows()):
self.table[r].pop(c)
def addCol (self,name):
'''
Add a new column called name at the end of the row.
Cells with be empty.
Returns the cid of the new column, same as cindex(cname).
'''
#update
self.table[0].append(name)
self._uniqueColumns()
for rid in range(1, self.nrows()):
self.table[rid].append('') # append empty cells for all rows
return len(self.table[0])-1 # len starts counting at 1, but I want 0
def clean_whitespace (self,cname):
cid=self.cindex(cname)
for rid in range(1, td.nrows()):
td.table[rid][cid]=td.table[rid][cid].replace('\r\n', ' ').replace(' ', ' ')
##
## MORE COMPLEX MANIPULATION
##
def delCellAIfColBEq (self,cnameA, cnameB, needle):
'''
empty cell in column cnameA if value in column cnameB equals needle in every row
untested
'''
colA=self.cindex(cnameA)
colB=self.cindex(cnameB)
for rid in range(1, self.nrows()):
if self.table[rid][colB] == needle:
self.verbose ('delCellAifColBEq A:%s, B:%s, needle %s' % (cnameA, cnameB, needle))
selt.table[rid][colA]=''
def delCellAIfColBContains (self,col_a, col_b, needle): pass
def delRowIfColContains (self, cname, needle):
'''
Delete row if column equals the value 'needle'
Should we use cname or c (colId)?
'''
#cant loop thru rows and delete one during the loop
col=self.cindex(cname)
#it appears that excel and xlrd start with 1
#todo: not sure why I have shave off one here!
r=self.nrows()-1
while r> 1:
#print ('AA%i/%i: ' % (r,col))
cell=self.cell (r, col)
if needle in str(cell):
#print ('DD:%i/%s:%s' % (r, cname, cell))
#print ('delRowIfColEq: needle %s found in row %i'% (needle, r))
self.delRow(r)
r -=1
def delRowIfColEq (self,col, needle): pass
def renameCol (self, cnameOld, cnameNew):
'''
renames column cnameOld into cnameNew
'''
c=self.cindex(cnameOld)
self.table[0][c]=cnameNew
def default_per_col (cname, default_value):
'''
Default Value: if cell is empty replace with default value
self.default_per_col ('status', 'filled')
'''
cid=td.cindex(cname)
for rid in range(1, td.nrows()):
if not td.cell (cid,rid):
self.table[rid][cid]=default_value
###
### converting to outside world
###
def _outTest(self,out):
if os.path.exists(out):
self.verbose('Output exists already, will be overwritten: %s' %out)
def write (self, out):
'''
write to file with extension-awareness
'''
ext=os.path.splitext(out)[1][1:].lower()
if (ext == 'xml'):
self.writeXML (out)
elif (ext == 'csv'):
self.writeCSV (out)
elif (ext == 'json'):
self.writeJSON (out)
else:
print ('Format %s not recognized' % ext)
def writeCSV (self,outfile):
'''
writes data in tableData object to outfile in csv format
Values with commas are quoted.
'''
import csv
self._outTest(outfile)
with open(outfile, mode='w', newline='', encoding='utf-8') as csvfile:
out = csv.writer(csvfile, dialect='excel')
for r in range(0, self.nrows()):
row=self.table[r]
out.writerow(row)
self.verbose ('csv written to %s' % outfile)
def writeXML (self,out):
'''
writes table data to file out in xml format
'''
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
root = ET.Element("tdx") #table data xml
self._outTest(out)
def _indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#don't need cnames here, so start at 1, but then write all columns in first row
for r in range(1, self.nrows()):
doc = ET.SubElement(root, "row")
for c in range(0, self.ncols()):
cell = self.cell(c,r)
#print ('x,y: %i/%i: %s->%s ' % (r, c, self.columns[c], cell))
#for round trip I need empty cells, at least in the first row
if cell or r == 1:
ET.SubElement(doc, self.table[0][c]).text=escape(str(cell))
tree = ET.ElementTree(root)
_indent(root)
tree.write(out, encoding='UTF-8', xml_declaration=True)
self.verbose ('xml written to %s' % out)
def writeJSON (self, out):
'''
Writes table data in json to file out
JSON doesn't have date type, hence default=str
'''
import json
self._outTest(out)
f = open(out, 'w')
with f as outfile:
json.dump(self.table, outfile, default=str)
self.verbose ('json written to %s' % out)
if __name__ == '__main__': pass
| def XLRDParser (self, infile):
'''
Parses old excel file into tableData object. Only first sheet.
Dont use this directly, use
td=TableData('xsl', infile)
td=TableData.load=table(infile)
instead
xlrd uses UTF16. What comes out of here?
TO DO:
1. better tests for
-Unicode issues not tested
-Excel data fields change appearance
2. conversion/transformation stuff
'''
import xlrd
import xlrd.sheet
from xlrd.sheet import ctype_text
self.table=[] # will hold sheet in memory as list of list
self.verbose ('xlrd infile %s' % infile)
#if not os.path.isfile(infile):
# raise Exception ('Input file not found')
wb = xlrd.open_workbook(filename=infile, on_demand=True)
sheet= wb.sheet_by_index(0)
#I'm assuming here that first row consist only of text cells?
#start at r=0 because we want to preserve the columns
for r in range(0, sheet.nrows): #no
row=[]
for c in range(sheet.ncols):
cell = sheet.cell(r, c)
cellTypeStr = ctype_text.get(cell.ctype, 'unknown type')
val=cell.value
#convert cell types -> dates look changed, but may not be (seconds since epoch)!
if cellTypeStr == "number":
val=int(float(val))
elif cellTypeStr == "xldate":
val=xlrd.xldate.xldate_as_datetime(val, 0)
#Warn if comma -> to check if escaped correctly -> quoting works
#if ',' in str(val):
# self.verbose ("%i/%i contains a comma" % (c,r) )
row.append(val)
self.table.append(row)
wb.unload_sheet(0) #unload xlrd sheet to save memory | 96 | 148 | import os
'''
TableData deals with data that comes from MS Excel, csv, xml. More precisely, it expects
a single table which has headings in the first row. It converts between these formats and usually keeps
information on a round trip between those formats identical.
TableData also allows for simple transformations, like dropping a column.
CONVENTIONS
*cid is column no or column id
*rid is row no or row id
*cell refers the content of a cell, a cell is represented by cid|rid, as two integers or (not sure yet) a tuple or a list
*cname is the column name (in row 0)
NOTE
* (x|y) not rows x cols
* Currently internal cells do have a type, which may be flattened to str if output is type agnostic.
* cid and rid begins with 0, so first cell is 0|0, but ncols and nrows start at 1. Strangely enough, sometimes that is convenient.
* interface prefers cname over cid
LIMITATIONS
Data is stored in memory (in a two dimensional list of lists), so max. size depends on available memory (ram).
WHAT NOT TO DO
I will NOT allow conversion INTO Excel xsl format, only reading from it.
I will not abstract this thing too far. I write it for my current Excel version and the csv flavor that I
need (e.g. csv is escaped only for values that contain commas). I don't need multiple Excel sheets,
formatting in Excel, lots of types in Excel.
UNICODE
I am going for UTF-8 encoding, but not sure I have it everywhere yet. xlrd is internally in UTF16LE, I believe.
Roundtrip Exceptions
*date
XML Format made by TableData is
<tdx>
<row>
<cnameA>cell value</cnameA>
<cnameB>cell value</cnameB>
...
</row>
</tdx>
The first row will have all columns, even empty ones. The other rows usually omit empty elements with empty values.
'''
class TableData:
def verbose (self, msg):
if self._verbose:
print (msg)
def _uniqueColumns (self):
'''
raise exception if column names (cnames) are not unique
'''
if len(set(self.table[0])) != len(self.table[0]):
raise Exception('Column names not unique')
def __init__ (self, ingester, infile, verbose=None):
self._verbose=verbose
if ingester == 'xml':
self.XMLParser(infile)
elif ingester == 'xls':
self.XLRDParser(infile)
elif ingester == 'csv':
self.CSVParser(infile)
elif ingester == 'json':
self.JSONParser(infile)
#todo: modern excel
else:
raise Exception ('Ingester %s not found' % ingester)
self._uniqueColumns()
#
# INGESTERS (xml, csv)
#
def load_table (path, verbose=None):
'''
File extension aware ingester
td=TableData.load_table(path)
This is an alternative to _init_. Is this pythonic enough?
'''
ext=os.path.splitext(path)[1][1:]
return TableData (ext, path,verbose)
def XLRDParser (self, infile):
'''
Parses old excel file into tableData object. Only first sheet.
Dont use this directly, use
td=TableData('xsl', infile)
td=TableData.load=table(infile)
instead
xlrd uses UTF16. What comes out of here?
TO DO:
1. better tests for
-Unicode issues not tested
-Excel data fields change appearance
2. conversion/transformation stuff
'''
import xlrd
import xlrd.sheet
from xlrd.sheet import ctype_text
self.table=[] # will hold sheet in memory as list of list
self.verbose ('xlrd infile %s' % infile)
#if not os.path.isfile(infile):
# raise Exception ('Input file not found')
wb = xlrd.open_workbook(filename=infile, on_demand=True)
sheet= wb.sheet_by_index(0)
#I'm assuming here that first row consist only of text cells?
#start at r=0 because we want to preserve the columns
for r in range(0, sheet.nrows): #no
row=[]
for c in range(sheet.ncols):
cell = sheet.cell(r, c)
cellTypeStr = ctype_text.get(cell.ctype, 'unknown type')
val=cell.value
#convert cell types -> dates look changed, but may not be (seconds since epoch)!
if cellTypeStr == "number":
val=int(float(val))
elif cellTypeStr == "xldate":
val=xlrd.xldate.xldate_as_datetime(val, 0)
#Warn if comma -> to check if escaped correctly -> quoting works
#if ',' in str(val):
# self.verbose ("%i/%i contains a comma" % (c,r) )
row.append(val)
self.table.append(row)
wb.unload_sheet(0) #unload xlrd sheet to save memory
def CSVParser (self,infile):
import csv
self.table=[] # will hold sheet in memory as list of list
self.verbose ('csvParser: ' + str(infile))
with open(infile, mode='r', newline='') as csvfile:
incsv = csv.reader(csvfile, dialect='excel')
for row in incsv:
self.table.append(row)
#self.verbose (str(row))
def XMLParser (self,infile):
#It is practically impossible to reconstruct the full list of columns from xml file
#if xmlWriter leaves out empty elements. Instead, I write them at least for first row.
self.table=[] # will hold sheet in memory as list of list; overwrite
self.verbose ('xml infile %s' % infile)
import xml.etree.ElementTree as ET
tree = ET.parse(infile)
for row in tree.iter("row"):
c=0
cnames=[]
col=[]
for e in row.iter():
if e.tag !='row':
#self.verbose ('%s %s' % (e.tag, e.text))
if len(self.table) == 0:
#need to create 2 rows from first row in xml
cnames.append(e.tag)
col.append(e.text)
if len(self.table) == 0:
self.table.append(cnames)
self.table.append(col)
#self.verbose (self.table)
def JSONParser (self, infile):
self.table=[] # will hold sheet in memory as list of list; overwrite
import json
self.verbose ('json infile %s' % infile)
json_data = open(infile, 'r').read()
self.table = json.loads(json_data)
##
## read table data, but NO manipulations
##
def ncols(self):
'''
Returns integer with number of columns in table data
'''
return len(self.table[0])
def nrows (self):
'''
Returns integer with number of rows in table data
'''
return len(self.table)
def cell (self, col,row):
'''
Return a cell for col,row.
td.cell(col,row)
Throws exception if col or row are not integer or out of range.
What happens on empty cell?
I stick to x|y format, although row|col might be more pythonic.
Empty cell is '' not None.
'''
try:
return self.table[row][col]
except:
self.verbose ('%i|%i doesnt exist' % (col, row))
exit (1)
def cindex (self,needle):
'''
Returns the column index (c) for column name 'needle'.
Throws 'not in list' if 'needle' is not a column name (cname).
'''
return self.table[0].index(needle)
def colExists (self, cname):
try:
self.table[0].index(cname)
return True
except:
return False
def search (self, needle):
'''
Returns list of cells [cid,rid] that contain the needle.
r=td.search(needle) # (1,1)
tuples, lists? I am not quite sure!
'''
results=[]
for rid in range(0, self.nrows()):
for cid in range(0, self.ncols()):
cell=self.cell(cid, rid)
#self.verbose ('ce:'+str(cell))
if str(needle) in str(cell):
#self.verbose ("%i/%i:%s->%s" % (cid, rid, cell, needle))
results.append ((cid,rid))
return results
def search_col (self, cname, needle):
'''
Returns list/set of rows that contain the needle for the given col.
td.search(cname, needle)
'''
results=()
c=cindex(cname)
for rid in range(0, self.nrows()):
if needle in self.cell(c,rid):
results.append(rid)
def show (self):
'''
print representation of table
Really print? Why not.
'''
for row in self.table:
print (row)
print ('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows()))
##
## SIMPLE UNCONDITIONAL TRANSFORMATIONS
##
def delRow (self, r):
'''
Drop a row by number.
Need to remake the index to cover the hole.
'''
#r always means rid
self.table.pop(r)
#print ('row %i deleted' % r)
def delCol (self, cname):
'''
Drop a column by cname
(Not tested.)
'''
c=self.cindex (cname)
for r in range(0, self.nrows()):
self.table[r].pop(c)
def addCol (self,name):
'''
Add a new column called name at the end of the row.
Cells with be empty.
Returns the cid of the new column, same as cindex(cname).
'''
#update
self.table[0].append(name)
self._uniqueColumns()
for rid in range(1, self.nrows()):
self.table[rid].append('') # append empty cells for all rows
return len(self.table[0])-1 # len starts counting at 1, but I want 0
def clean_whitespace (self,cname):
cid=self.cindex(cname)
for rid in range(1, td.nrows()):
td.table[rid][cid]=td.table[rid][cid].replace('\r\n', ' ').replace(' ', ' ')
##
## MORE COMPLEX MANIPULATION
##
def delCellAIfColBEq (self,cnameA, cnameB, needle):
'''
empty cell in column cnameA if value in column cnameB equals needle in every row
untested
'''
colA=self.cindex(cnameA)
colB=self.cindex(cnameB)
for rid in range(1, self.nrows()):
if self.table[rid][colB] == needle:
self.verbose ('delCellAifColBEq A:%s, B:%s, needle %s' % (cnameA, cnameB, needle))
selt.table[rid][colA]=''
def delCellAIfColBContains (self,col_a, col_b, needle): pass
def delRowIfColContains (self, cname, needle):
'''
Delete row if column equals the value 'needle'
Should we use cname or c (colId)?
'''
#cant loop thru rows and delete one during the loop
col=self.cindex(cname)
#it appears that excel and xlrd start with 1
#todo: not sure why I have shave off one here!
r=self.nrows()-1
while r> 1:
#print ('AA%i/%i: ' % (r,col))
cell=self.cell (r, col)
if needle in str(cell):
#print ('DD:%i/%s:%s' % (r, cname, cell))
#print ('delRowIfColEq: needle %s found in row %i'% (needle, r))
self.delRow(r)
r -=1
def delRowIfColEq (self,col, needle): pass
def renameCol (self, cnameOld, cnameNew):
'''
renames column cnameOld into cnameNew
'''
c=self.cindex(cnameOld)
self.table[0][c]=cnameNew
def default_per_col (cname, default_value):
'''
Default Value: if cell is empty replace with default value
self.default_per_col ('status', 'filled')
'''
cid=td.cindex(cname)
for rid in range(1, td.nrows()):
if not td.cell (cid,rid):
self.table[rid][cid]=default_value
###
### converting to outside world
###
def _outTest(self,out):
if os.path.exists(out):
self.verbose('Output exists already, will be overwritten: %s' %out)
def write (self, out):
'''
write to file with extension-awareness
'''
ext=os.path.splitext(out)[1][1:].lower()
if (ext == 'xml'):
self.writeXML (out)
elif (ext == 'csv'):
self.writeCSV (out)
elif (ext == 'json'):
self.writeJSON (out)
else:
print ('Format %s not recognized' % ext)
def writeCSV (self,outfile):
'''
writes data in tableData object to outfile in csv format
Values with commas are quoted.
'''
import csv
self._outTest(outfile)
with open(outfile, mode='w', newline='', encoding='utf-8') as csvfile:
out = csv.writer(csvfile, dialect='excel')
for r in range(0, self.nrows()):
row=self.table[r]
out.writerow(row)
self.verbose ('csv written to %s' % outfile)
def writeXML (self,out):
'''
writes table data to file out in xml format
'''
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
root = ET.Element("tdx") #table data xml
self._outTest(out)
def _indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#don't need cnames here, so start at 1, but then write all columns in first row
for r in range(1, self.nrows()):
doc = ET.SubElement(root, "row")
for c in range(0, self.ncols()):
cell = self.cell(c,r)
#print ('x,y: %i/%i: %s->%s ' % (r, c, self.columns[c], cell))
#for round trip I need empty cells, at least in the first row
if cell or r == 1:
ET.SubElement(doc, self.table[0][c]).text=escape(str(cell))
tree = ET.ElementTree(root)
_indent(root)
tree.write(out, encoding='UTF-8', xml_declaration=True)
self.verbose ('xml written to %s' % out)
def writeJSON (self, out):
'''
Writes table data in json to file out
JSON doesn't have date type, hence default=str
'''
import json
self._outTest(out)
f = open(out, 'w')
with f as outfile:
json.dump(self.table, outfile, default=str)
self.verbose ('json written to %s' % out)
if __name__ == '__main__': pass
|
search | Returns list of cells [cid,rid] that contain the needle.
r=td.search(needle) # (1,1)
tuples, lists? I am not quite sure! | import os
'''
TableData deals with data that comes from MS Excel, csv, xml. More precisely, it expects
a single table which has headings in the first row. It converts between these formats and usually keeps
information on a round trip between those formats identical.
TableData also allows for simple transformations, like dropping a column.
CONVENTIONS
*cid is column no or column id
*rid is row no or row id
*cell refers the content of a cell, a cell is represented by cid|rid, as two integers or (not sure yet) a tuple or a list
*cname is the column name (in row 0)
NOTE
* (x|y) not rows x cols
* Currently internal cells do have a type, which may be flattened to str if output is type agnostic.
* cid and rid begins with 0, so first cell is 0|0, but ncols and nrows start at 1. Strangely enough, sometimes that is convenient.
* interface prefers cname over cid
LIMITATIONS
Data is stored in memory (in a two dimensional list of lists), so max. size depends on available memory (ram).
WHAT NOT TO DO
I will NOT allow conversion INTO Excel xsl format, only reading from it.
I will not abstract this thing too far. I write it for my current Excel version and the csv flavor that I
need (e.g. csv is escaped only for values that contain commas). I don't need multiple Excel sheets,
formatting in Excel, lots of types in Excel.
UNICODE
I am going for UTF-8 encoding, but not sure I have it everywhere yet. xlrd is internally in UTF16LE, I believe.
Roundtrip Exceptions
*date
XML Format made by TableData is
<tdx>
<row>
<cnameA>cell value</cnameA>
<cnameB>cell value</cnameB>
...
</row>
</tdx>
The first row will have all columns, even empty ones. The other rows usually omit empty elements with empty values.
'''
class TableData:
def verbose (self, msg):
if self._verbose:
print (msg)
def _uniqueColumns (self):
'''
raise exception if column names (cnames) are not unique
'''
if len(set(self.table[0])) != len(self.table[0]):
raise Exception('Column names not unique')
def __init__ (self, ingester, infile, verbose=None):
self._verbose=verbose
if ingester == 'xml':
self.XMLParser(infile)
elif ingester == 'xls':
self.XLRDParser(infile)
elif ingester == 'csv':
self.CSVParser(infile)
elif ingester == 'json':
self.JSONParser(infile)
#todo: modern excel
else:
raise Exception ('Ingester %s not found' % ingester)
self._uniqueColumns()
#
# INGESTERS (xml, csv)
#
def load_table (path, verbose=None):
'''
File extension aware ingester
td=TableData.load_table(path)
This is an alternative to _init_. Is this pythonic enough?
'''
ext=os.path.splitext(path)[1][1:]
return TableData (ext, path,verbose)
def XLRDParser (self, infile):
'''
Parses old excel file into tableData object. Only first sheet.
Dont use this directly, use
td=TableData('xsl', infile)
td=TableData.load=table(infile)
instead
xlrd uses UTF16. What comes out of here?
TO DO:
1. better tests for
-Unicode issues not tested
-Excel data fields change appearance
2. conversion/transformation stuff
'''
import xlrd
import xlrd.sheet
from xlrd.sheet import ctype_text
self.table=[] # will hold sheet in memory as list of list
self.verbose ('xlrd infile %s' % infile)
#if not os.path.isfile(infile):
# raise Exception ('Input file not found')
wb = xlrd.open_workbook(filename=infile, on_demand=True)
sheet= wb.sheet_by_index(0)
#I'm assuming here that first row consist only of text cells?
#start at r=0 because we want to preserve the columns
for r in range(0, sheet.nrows): #no
row=[]
for c in range(sheet.ncols):
cell = sheet.cell(r, c)
cellTypeStr = ctype_text.get(cell.ctype, 'unknown type')
val=cell.value
#convert cell types -> dates look changed, but may not be (seconds since epoch)!
if cellTypeStr == "number":
val=int(float(val))
elif cellTypeStr == "xldate":
val=xlrd.xldate.xldate_as_datetime(val, 0)
#Warn if comma -> to check if escaped correctly -> quoting works
#if ',' in str(val):
# self.verbose ("%i/%i contains a comma" % (c,r) )
row.append(val)
self.table.append(row)
wb.unload_sheet(0) #unload xlrd sheet to save memory
def CSVParser (self,infile):
import csv
self.table=[] # will hold sheet in memory as list of list
self.verbose ('csvParser: ' + str(infile))
with open(infile, mode='r', newline='') as csvfile:
incsv = csv.reader(csvfile, dialect='excel')
for row in incsv:
self.table.append(row)
#self.verbose (str(row))
def XMLParser (self,infile):
#It is practically impossible to reconstruct the full list of columns from xml file
#if xmlWriter leaves out empty elements. Instead, I write them at least for first row.
self.table=[] # will hold sheet in memory as list of list; overwrite
self.verbose ('xml infile %s' % infile)
import xml.etree.ElementTree as ET
tree = ET.parse(infile)
for row in tree.iter("row"):
c=0
cnames=[]
col=[]
for e in row.iter():
if e.tag !='row':
#self.verbose ('%s %s' % (e.tag, e.text))
if len(self.table) == 0:
#need to create 2 rows from first row in xml
cnames.append(e.tag)
col.append(e.text)
if len(self.table) == 0:
self.table.append(cnames)
self.table.append(col)
#self.verbose (self.table)
def JSONParser (self, infile):
self.table=[] # will hold sheet in memory as list of list; overwrite
import json
self.verbose ('json infile %s' % infile)
json_data = open(infile, 'r').read()
self.table = json.loads(json_data)
##
## read table data, but NO manipulations
##
def ncols(self):
'''
Returns integer with number of columns in table data
'''
return len(self.table[0])
def nrows (self):
'''
Returns integer with number of rows in table data
'''
return len(self.table)
def cell (self, col,row):
'''
Return a cell for col,row.
td.cell(col,row)
Throws exception if col or row are not integer or out of range.
What happens on empty cell?
I stick to x|y format, although row|col might be more pythonic.
Empty cell is '' not None.
'''
try:
return self.table[row][col]
except:
self.verbose ('%i|%i doesnt exist' % (col, row))
exit (1)
def cindex (self,needle):
'''
Returns the column index (c) for column name 'needle'.
Throws 'not in list' if 'needle' is not a column name (cname).
'''
return self.table[0].index(needle)
def colExists (self, cname):
try:
self.table[0].index(cname)
return True
except:
return False
# MASKED: search function (lines 241-257)
def search_col (self, cname, needle):
'''
Returns list/set of rows that contain the needle for the given col.
td.search(cname, needle)
'''
results=()
c=cindex(cname)
for rid in range(0, self.nrows()):
if needle in self.cell(c,rid):
results.append(rid)
def show (self):
'''
print representation of table
Really print? Why not.
'''
for row in self.table:
print (row)
print ('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows()))
##
## SIMPLE UNCONDITIONAL TRANSFORMATIONS
##
def delRow (self, r):
'''
Drop a row by number.
Need to remake the index to cover the hole.
'''
#r always means rid
self.table.pop(r)
#print ('row %i deleted' % r)
def delCol (self, cname):
'''
Drop a column by cname
(Not tested.)
'''
c=self.cindex (cname)
for r in range(0, self.nrows()):
self.table[r].pop(c)
def addCol (self,name):
'''
Add a new column called name at the end of the row.
Cells with be empty.
Returns the cid of the new column, same as cindex(cname).
'''
#update
self.table[0].append(name)
self._uniqueColumns()
for rid in range(1, self.nrows()):
self.table[rid].append('') # append empty cells for all rows
return len(self.table[0])-1 # len starts counting at 1, but I want 0
def clean_whitespace (self,cname):
cid=self.cindex(cname)
for rid in range(1, td.nrows()):
td.table[rid][cid]=td.table[rid][cid].replace('\r\n', ' ').replace(' ', ' ')
##
## MORE COMPLEX MANIPULATION
##
def delCellAIfColBEq (self,cnameA, cnameB, needle):
'''
empty cell in column cnameA if value in column cnameB equals needle in every row
untested
'''
colA=self.cindex(cnameA)
colB=self.cindex(cnameB)
for rid in range(1, self.nrows()):
if self.table[rid][colB] == needle:
self.verbose ('delCellAifColBEq A:%s, B:%s, needle %s' % (cnameA, cnameB, needle))
selt.table[rid][colA]=''
def delCellAIfColBContains (self,col_a, col_b, needle): pass
def delRowIfColContains (self, cname, needle):
'''
Delete row if column equals the value 'needle'
Should we use cname or c (colId)?
'''
#cant loop thru rows and delete one during the loop
col=self.cindex(cname)
#it appears that excel and xlrd start with 1
#todo: not sure why I have shave off one here!
r=self.nrows()-1
while r> 1:
#print ('AA%i/%i: ' % (r,col))
cell=self.cell (r, col)
if needle in str(cell):
#print ('DD:%i/%s:%s' % (r, cname, cell))
#print ('delRowIfColEq: needle %s found in row %i'% (needle, r))
self.delRow(r)
r -=1
def delRowIfColEq (self,col, needle): pass
def renameCol (self, cnameOld, cnameNew):
'''
renames column cnameOld into cnameNew
'''
c=self.cindex(cnameOld)
self.table[0][c]=cnameNew
def default_per_col (cname, default_value):
'''
Default Value: if cell is empty replace with default value
self.default_per_col ('status', 'filled')
'''
cid=td.cindex(cname)
for rid in range(1, td.nrows()):
if not td.cell (cid,rid):
self.table[rid][cid]=default_value
###
### converting to outside world
###
def _outTest(self,out):
if os.path.exists(out):
self.verbose('Output exists already, will be overwritten: %s' %out)
def write (self, out):
'''
write to file with extension-awareness
'''
ext=os.path.splitext(out)[1][1:].lower()
if (ext == 'xml'):
self.writeXML (out)
elif (ext == 'csv'):
self.writeCSV (out)
elif (ext == 'json'):
self.writeJSON (out)
else:
print ('Format %s not recognized' % ext)
def writeCSV (self,outfile):
'''
writes data in tableData object to outfile in csv format
Values with commas are quoted.
'''
import csv
self._outTest(outfile)
with open(outfile, mode='w', newline='', encoding='utf-8') as csvfile:
out = csv.writer(csvfile, dialect='excel')
for r in range(0, self.nrows()):
row=self.table[r]
out.writerow(row)
self.verbose ('csv written to %s' % outfile)
def writeXML (self,out):
'''
writes table data to file out in xml format
'''
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
root = ET.Element("tdx") #table data xml
self._outTest(out)
def _indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#don't need cnames here, so start at 1, but then write all columns in first row
for r in range(1, self.nrows()):
doc = ET.SubElement(root, "row")
for c in range(0, self.ncols()):
cell = self.cell(c,r)
#print ('x,y: %i/%i: %s->%s ' % (r, c, self.columns[c], cell))
#for round trip I need empty cells, at least in the first row
if cell or r == 1:
ET.SubElement(doc, self.table[0][c]).text=escape(str(cell))
tree = ET.ElementTree(root)
_indent(root)
tree.write(out, encoding='UTF-8', xml_declaration=True)
self.verbose ('xml written to %s' % out)
def writeJSON (self, out):
'''
Writes table data in json to file out
JSON doesn't have date type, hence default=str
'''
import json
self._outTest(out)
f = open(out, 'w')
with f as outfile:
json.dump(self.table, outfile, default=str)
self.verbose ('json written to %s' % out)
if __name__ == '__main__': pass
| def search (self, needle):
'''
Returns list of cells [cid,rid] that contain the needle.
r=td.search(needle) # (1,1)
tuples, lists? I am not quite sure!
'''
results=[]
for rid in range(0, self.nrows()):
for cid in range(0, self.ncols()):
cell=self.cell(cid, rid)
#self.verbose ('ce:'+str(cell))
if str(needle) in str(cell):
#self.verbose ("%i/%i:%s->%s" % (cid, rid, cell, needle))
results.append ((cid,rid))
return results | 241 | 257 | import os
'''
TableData deals with data that comes from MS Excel, csv, xml. More precisely, it expects
a single table which has headings in the first row. It converts between these formats and usually keeps
information on a round trip between those formats identical.
TableData also allows for simple transformations, like dropping a column.
CONVENTIONS
*cid is column no or column id
*rid is row no or row id
*cell refers the content of a cell, a cell is represented by cid|rid, as two integers or (not sure yet) a tuple or a list
*cname is the column name (in row 0)
NOTE
* (x|y) not rows x cols
* Currently internal cells do have a type, which may be flattened to str if output is type agnostic.
* cid and rid begins with 0, so first cell is 0|0, but ncols and nrows start at 1. Strangely enough, sometimes that is convenient.
* interface prefers cname over cid
LIMITATIONS
Data is stored in memory (in a two dimensional list of lists), so max. size depends on available memory (ram).
WHAT NOT TO DO
I will NOT allow conversion INTO Excel xsl format, only reading from it.
I will not abstract this thing too far. I write it for my current Excel version and the csv flavor that I
need (e.g. csv is escaped only for values that contain commas). I don't need multiple Excel sheets,
formatting in Excel, lots of types in Excel.
UNICODE
I am going for UTF-8 encoding, but not sure I have it everywhere yet. xlrd is internally in UTF16LE, I believe.
Roundtrip Exceptions
*date
XML Format made by TableData is
<tdx>
<row>
<cnameA>cell value</cnameA>
<cnameB>cell value</cnameB>
...
</row>
</tdx>
The first row will have all columns, even empty ones. The other rows usually omit empty elements with empty values.
'''
class TableData:
def verbose (self, msg):
if self._verbose:
print (msg)
def _uniqueColumns (self):
'''
raise exception if column names (cnames) are not unique
'''
if len(set(self.table[0])) != len(self.table[0]):
raise Exception('Column names not unique')
def __init__ (self, ingester, infile, verbose=None):
self._verbose=verbose
if ingester == 'xml':
self.XMLParser(infile)
elif ingester == 'xls':
self.XLRDParser(infile)
elif ingester == 'csv':
self.CSVParser(infile)
elif ingester == 'json':
self.JSONParser(infile)
#todo: modern excel
else:
raise Exception ('Ingester %s not found' % ingester)
self._uniqueColumns()
#
# INGESTERS (xml, csv)
#
def load_table (path, verbose=None):
'''
File extension aware ingester
td=TableData.load_table(path)
This is an alternative to _init_. Is this pythonic enough?
'''
ext=os.path.splitext(path)[1][1:]
return TableData (ext, path,verbose)
def XLRDParser (self, infile):
'''
Parses old excel file into tableData object. Only first sheet.
Dont use this directly, use
td=TableData('xsl', infile)
td=TableData.load=table(infile)
instead
xlrd uses UTF16. What comes out of here?
TO DO:
1. better tests for
-Unicode issues not tested
-Excel data fields change appearance
2. conversion/transformation stuff
'''
import xlrd
import xlrd.sheet
from xlrd.sheet import ctype_text
self.table=[] # will hold sheet in memory as list of list
self.verbose ('xlrd infile %s' % infile)
#if not os.path.isfile(infile):
# raise Exception ('Input file not found')
wb = xlrd.open_workbook(filename=infile, on_demand=True)
sheet= wb.sheet_by_index(0)
#I'm assuming here that first row consist only of text cells?
#start at r=0 because we want to preserve the columns
for r in range(0, sheet.nrows): #no
row=[]
for c in range(sheet.ncols):
cell = sheet.cell(r, c)
cellTypeStr = ctype_text.get(cell.ctype, 'unknown type')
val=cell.value
#convert cell types -> dates look changed, but may not be (seconds since epoch)!
if cellTypeStr == "number":
val=int(float(val))
elif cellTypeStr == "xldate":
val=xlrd.xldate.xldate_as_datetime(val, 0)
#Warn if comma -> to check if escaped correctly -> quoting works
#if ',' in str(val):
# self.verbose ("%i/%i contains a comma" % (c,r) )
row.append(val)
self.table.append(row)
wb.unload_sheet(0) #unload xlrd sheet to save memory
def CSVParser (self,infile):
import csv
self.table=[] # will hold sheet in memory as list of list
self.verbose ('csvParser: ' + str(infile))
with open(infile, mode='r', newline='') as csvfile:
incsv = csv.reader(csvfile, dialect='excel')
for row in incsv:
self.table.append(row)
#self.verbose (str(row))
def XMLParser (self,infile):
#It is practically impossible to reconstruct the full list of columns from xml file
#if xmlWriter leaves out empty elements. Instead, I write them at least for first row.
self.table=[] # will hold sheet in memory as list of list; overwrite
self.verbose ('xml infile %s' % infile)
import xml.etree.ElementTree as ET
tree = ET.parse(infile)
for row in tree.iter("row"):
c=0
cnames=[]
col=[]
for e in row.iter():
if e.tag !='row':
#self.verbose ('%s %s' % (e.tag, e.text))
if len(self.table) == 0:
#need to create 2 rows from first row in xml
cnames.append(e.tag)
col.append(e.text)
if len(self.table) == 0:
self.table.append(cnames)
self.table.append(col)
#self.verbose (self.table)
def JSONParser (self, infile):
self.table=[] # will hold sheet in memory as list of list; overwrite
import json
self.verbose ('json infile %s' % infile)
json_data = open(infile, 'r').read()
self.table = json.loads(json_data)
##
## read table data, but NO manipulations
##
def ncols(self):
'''
Returns integer with number of columns in table data
'''
return len(self.table[0])
def nrows (self):
'''
Returns integer with number of rows in table data
'''
return len(self.table)
def cell (self, col,row):
'''
Return a cell for col,row.
td.cell(col,row)
Throws exception if col or row are not integer or out of range.
What happens on empty cell?
I stick to x|y format, although row|col might be more pythonic.
Empty cell is '' not None.
'''
try:
return self.table[row][col]
except:
self.verbose ('%i|%i doesnt exist' % (col, row))
exit (1)
def cindex (self,needle):
'''
Returns the column index (c) for column name 'needle'.
Throws 'not in list' if 'needle' is not a column name (cname).
'''
return self.table[0].index(needle)
def colExists (self, cname):
try:
self.table[0].index(cname)
return True
except:
return False
def search (self, needle):
'''
Returns list of cells [cid,rid] that contain the needle.
r=td.search(needle) # (1,1)
tuples, lists? I am not quite sure!
'''
results=[]
for rid in range(0, self.nrows()):
for cid in range(0, self.ncols()):
cell=self.cell(cid, rid)
#self.verbose ('ce:'+str(cell))
if str(needle) in str(cell):
#self.verbose ("%i/%i:%s->%s" % (cid, rid, cell, needle))
results.append ((cid,rid))
return results
def search_col (self, cname, needle):
'''
Returns list/set of rows that contain the needle for the given col.
td.search(cname, needle)
'''
results=()
c=cindex(cname)
for rid in range(0, self.nrows()):
if needle in self.cell(c,rid):
results.append(rid)
def show (self):
'''
print representation of table
Really print? Why not.
'''
for row in self.table:
print (row)
print ('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows()))
##
## SIMPLE UNCONDITIONAL TRANSFORMATIONS
##
def delRow (self, r):
'''
Drop a row by number.
Need to remake the index to cover the hole.
'''
#r always means rid
self.table.pop(r)
#print ('row %i deleted' % r)
def delCol (self, cname):
'''
Drop a column by cname
(Not tested.)
'''
c=self.cindex (cname)
for r in range(0, self.nrows()):
self.table[r].pop(c)
def addCol (self,name):
'''
Add a new column called name at the end of the row.
Cells with be empty.
Returns the cid of the new column, same as cindex(cname).
'''
#update
self.table[0].append(name)
self._uniqueColumns()
for rid in range(1, self.nrows()):
self.table[rid].append('') # append empty cells for all rows
return len(self.table[0])-1 # len starts counting at 1, but I want 0
def clean_whitespace (self,cname):
cid=self.cindex(cname)
for rid in range(1, td.nrows()):
td.table[rid][cid]=td.table[rid][cid].replace('\r\n', ' ').replace(' ', ' ')
##
## MORE COMPLEX MANIPULATION
##
def delCellAIfColBEq (self,cnameA, cnameB, needle):
'''
empty cell in column cnameA if value in column cnameB equals needle in every row
untested
'''
colA=self.cindex(cnameA)
colB=self.cindex(cnameB)
for rid in range(1, self.nrows()):
if self.table[rid][colB] == needle:
self.verbose ('delCellAifColBEq A:%s, B:%s, needle %s' % (cnameA, cnameB, needle))
selt.table[rid][colA]=''
def delCellAIfColBContains (self,col_a, col_b, needle): pass
def delRowIfColContains (self, cname, needle):
'''
Delete row if column equals the value 'needle'
Should we use cname or c (colId)?
'''
#cant loop thru rows and delete one during the loop
col=self.cindex(cname)
#it appears that excel and xlrd start with 1
#todo: not sure why I have shave off one here!
r=self.nrows()-1
while r> 1:
#print ('AA%i/%i: ' % (r,col))
cell=self.cell (r, col)
if needle in str(cell):
#print ('DD:%i/%s:%s' % (r, cname, cell))
#print ('delRowIfColEq: needle %s found in row %i'% (needle, r))
self.delRow(r)
r -=1
def delRowIfColEq (self,col, needle): pass
def renameCol (self, cnameOld, cnameNew):
'''
renames column cnameOld into cnameNew
'''
c=self.cindex(cnameOld)
self.table[0][c]=cnameNew
def default_per_col (cname, default_value):
'''
Default Value: if cell is empty replace with default value
self.default_per_col ('status', 'filled')
'''
cid=td.cindex(cname)
for rid in range(1, td.nrows()):
if not td.cell (cid,rid):
self.table[rid][cid]=default_value
###
### converting to outside world
###
def _outTest(self,out):
if os.path.exists(out):
self.verbose('Output exists already, will be overwritten: %s' %out)
def write (self, out):
'''
write to file with extension-awareness
'''
ext=os.path.splitext(out)[1][1:].lower()
if (ext == 'xml'):
self.writeXML (out)
elif (ext == 'csv'):
self.writeCSV (out)
elif (ext == 'json'):
self.writeJSON (out)
else:
print ('Format %s not recognized' % ext)
def writeCSV (self,outfile):
'''
writes data in tableData object to outfile in csv format
Values with commas are quoted.
'''
import csv
self._outTest(outfile)
with open(outfile, mode='w', newline='', encoding='utf-8') as csvfile:
out = csv.writer(csvfile, dialect='excel')
for r in range(0, self.nrows()):
row=self.table[r]
out.writerow(row)
self.verbose ('csv written to %s' % outfile)
def writeXML (self,out):
'''
writes table data to file out in xml format
'''
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
root = ET.Element("tdx") #table data xml
self._outTest(out)
def _indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#don't need cnames here, so start at 1, but then write all columns in first row
for r in range(1, self.nrows()):
doc = ET.SubElement(root, "row")
for c in range(0, self.ncols()):
cell = self.cell(c,r)
#print ('x,y: %i/%i: %s->%s ' % (r, c, self.columns[c], cell))
#for round trip I need empty cells, at least in the first row
if cell or r == 1:
ET.SubElement(doc, self.table[0][c]).text=escape(str(cell))
tree = ET.ElementTree(root)
_indent(root)
tree.write(out, encoding='UTF-8', xml_declaration=True)
self.verbose ('xml written to %s' % out)
def writeJSON (self, out):
'''
Writes table data in json to file out
JSON doesn't have date type, hence default=str
'''
import json
self._outTest(out)
f = open(out, 'w')
with f as outfile:
json.dump(self.table, outfile, default=str)
self.verbose ('json written to %s' % out)
if __name__ == '__main__': pass
|
addCol | Add a new column called name at the end of the row.
Cells with be empty.
Returns the cid of the new column, same as cindex(cname). | import os
'''
TableData deals with data that comes from MS Excel, csv, xml. More precisely, it expects
a single table which has headings in the first row. It converts between these formats and usually keeps
information on a round trip between those formats identical.
TableData also allows for simple transformations, like dropping a column.
CONVENTIONS
*cid is column no or column id
*rid is row no or row id
*cell refers the content of a cell, a cell is represented by cid|rid, as two integers or (not sure yet) a tuple or a list
*cname is the column name (in row 0)
NOTE
* (x|y) not rows x cols
* Currently internal cells do have a type, which may be flattened to str if output is type agnostic.
* cid and rid begins with 0, so first cell is 0|0, but ncols and nrows start at 1. Strangely enough, sometimes that is convenient.
* interface prefers cname over cid
LIMITATIONS
Data is stored in memory (in a two dimensional list of lists), so max. size depends on available memory (ram).
WHAT NOT TO DO
I will NOT allow conversion INTO Excel xsl format, only reading from it.
I will not abstract this thing too far. I write it for my current Excel version and the csv flavor that I
need (e.g. csv is escaped only for values that contain commas). I don't need multiple Excel sheets,
formatting in Excel, lots of types in Excel.
UNICODE
I am going for UTF-8 encoding, but not sure I have it everywhere yet. xlrd is internally in UTF16LE, I believe.
Roundtrip Exceptions
*date
XML Format made by TableData is
<tdx>
<row>
<cnameA>cell value</cnameA>
<cnameB>cell value</cnameB>
...
</row>
</tdx>
The first row will have all columns, even empty ones. The other rows usually omit empty elements with empty values.
'''
class TableData:
def verbose (self, msg):
if self._verbose:
print (msg)
def _uniqueColumns (self):
'''
raise exception if column names (cnames) are not unique
'''
if len(set(self.table[0])) != len(self.table[0]):
raise Exception('Column names not unique')
def __init__ (self, ingester, infile, verbose=None):
self._verbose=verbose
if ingester == 'xml':
self.XMLParser(infile)
elif ingester == 'xls':
self.XLRDParser(infile)
elif ingester == 'csv':
self.CSVParser(infile)
elif ingester == 'json':
self.JSONParser(infile)
#todo: modern excel
else:
raise Exception ('Ingester %s not found' % ingester)
self._uniqueColumns()
#
# INGESTERS (xml, csv)
#
def load_table (path, verbose=None):
'''
File extension aware ingester
td=TableData.load_table(path)
This is an alternative to _init_. Is this pythonic enough?
'''
ext=os.path.splitext(path)[1][1:]
return TableData (ext, path,verbose)
def XLRDParser (self, infile):
'''
Parses old excel file into tableData object. Only first sheet.
Dont use this directly, use
td=TableData('xsl', infile)
td=TableData.load=table(infile)
instead
xlrd uses UTF16. What comes out of here?
TO DO:
1. better tests for
-Unicode issues not tested
-Excel data fields change appearance
2. conversion/transformation stuff
'''
import xlrd
import xlrd.sheet
from xlrd.sheet import ctype_text
self.table=[] # will hold sheet in memory as list of list
self.verbose ('xlrd infile %s' % infile)
#if not os.path.isfile(infile):
# raise Exception ('Input file not found')
wb = xlrd.open_workbook(filename=infile, on_demand=True)
sheet= wb.sheet_by_index(0)
#I'm assuming here that first row consist only of text cells?
#start at r=0 because we want to preserve the columns
for r in range(0, sheet.nrows): #no
row=[]
for c in range(sheet.ncols):
cell = sheet.cell(r, c)
cellTypeStr = ctype_text.get(cell.ctype, 'unknown type')
val=cell.value
#convert cell types -> dates look changed, but may not be (seconds since epoch)!
if cellTypeStr == "number":
val=int(float(val))
elif cellTypeStr == "xldate":
val=xlrd.xldate.xldate_as_datetime(val, 0)
#Warn if comma -> to check if escaped correctly -> quoting works
#if ',' in str(val):
# self.verbose ("%i/%i contains a comma" % (c,r) )
row.append(val)
self.table.append(row)
wb.unload_sheet(0) #unload xlrd sheet to save memory
def CSVParser (self,infile):
import csv
self.table=[] # will hold sheet in memory as list of list
self.verbose ('csvParser: ' + str(infile))
with open(infile, mode='r', newline='') as csvfile:
incsv = csv.reader(csvfile, dialect='excel')
for row in incsv:
self.table.append(row)
#self.verbose (str(row))
def XMLParser (self,infile):
#It is practically impossible to reconstruct the full list of columns from xml file
#if xmlWriter leaves out empty elements. Instead, I write them at least for first row.
self.table=[] # will hold sheet in memory as list of list; overwrite
self.verbose ('xml infile %s' % infile)
import xml.etree.ElementTree as ET
tree = ET.parse(infile)
for row in tree.iter("row"):
c=0
cnames=[]
col=[]
for e in row.iter():
if e.tag !='row':
#self.verbose ('%s %s' % (e.tag, e.text))
if len(self.table) == 0:
#need to create 2 rows from first row in xml
cnames.append(e.tag)
col.append(e.text)
if len(self.table) == 0:
self.table.append(cnames)
self.table.append(col)
#self.verbose (self.table)
def JSONParser (self, infile):
self.table=[] # will hold sheet in memory as list of list; overwrite
import json
self.verbose ('json infile %s' % infile)
json_data = open(infile, 'r').read()
self.table = json.loads(json_data)
##
## read table data, but NO manipulations
##
def ncols(self):
'''
Returns integer with number of columns in table data
'''
return len(self.table[0])
def nrows (self):
'''
Returns integer with number of rows in table data
'''
return len(self.table)
def cell (self, col,row):
'''
Return a cell for col,row.
td.cell(col,row)
Throws exception if col or row are not integer or out of range.
What happens on empty cell?
I stick to x|y format, although row|col might be more pythonic.
Empty cell is '' not None.
'''
try:
return self.table[row][col]
except:
self.verbose ('%i|%i doesnt exist' % (col, row))
exit (1)
def cindex (self,needle):
'''
Returns the column index (c) for column name 'needle'.
Throws 'not in list' if 'needle' is not a column name (cname).
'''
return self.table[0].index(needle)
def colExists (self, cname):
try:
self.table[0].index(cname)
return True
except:
return False
def search (self, needle):
'''
Returns list of cells [cid,rid] that contain the needle.
r=td.search(needle) # (1,1)
tuples, lists? I am not quite sure!
'''
results=[]
for rid in range(0, self.nrows()):
for cid in range(0, self.ncols()):
cell=self.cell(cid, rid)
#self.verbose ('ce:'+str(cell))
if str(needle) in str(cell):
#self.verbose ("%i/%i:%s->%s" % (cid, rid, cell, needle))
results.append ((cid,rid))
return results
def search_col (self, cname, needle):
'''
Returns list/set of rows that contain the needle for the given col.
td.search(cname, needle)
'''
results=()
c=cindex(cname)
for rid in range(0, self.nrows()):
if needle in self.cell(c,rid):
results.append(rid)
def show (self):
'''
print representation of table
Really print? Why not.
'''
for row in self.table:
print (row)
print ('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows()))
##
## SIMPLE UNCONDITIONAL TRANSFORMATIONS
##
def delRow (self, r):
'''
Drop a row by number.
Need to remake the index to cover the hole.
'''
#r always means rid
self.table.pop(r)
#print ('row %i deleted' % r)
def delCol (self, cname):
'''
Drop a column by cname
(Not tested.)
'''
c=self.cindex (cname)
for r in range(0, self.nrows()):
self.table[r].pop(c)
# MASKED: addCol function (lines 308-320)
def clean_whitespace (self,cname):
cid=self.cindex(cname)
for rid in range(1, td.nrows()):
td.table[rid][cid]=td.table[rid][cid].replace('\r\n', ' ').replace(' ', ' ')
##
## MORE COMPLEX MANIPULATION
##
def delCellAIfColBEq (self,cnameA, cnameB, needle):
'''
empty cell in column cnameA if value in column cnameB equals needle in every row
untested
'''
colA=self.cindex(cnameA)
colB=self.cindex(cnameB)
for rid in range(1, self.nrows()):
if self.table[rid][colB] == needle:
self.verbose ('delCellAifColBEq A:%s, B:%s, needle %s' % (cnameA, cnameB, needle))
selt.table[rid][colA]=''
def delCellAIfColBContains (self,col_a, col_b, needle): pass
def delRowIfColContains (self, cname, needle):
'''
Delete row if column equals the value 'needle'
Should we use cname or c (colId)?
'''
#cant loop thru rows and delete one during the loop
col=self.cindex(cname)
#it appears that excel and xlrd start with 1
#todo: not sure why I have shave off one here!
r=self.nrows()-1
while r> 1:
#print ('AA%i/%i: ' % (r,col))
cell=self.cell (r, col)
if needle in str(cell):
#print ('DD:%i/%s:%s' % (r, cname, cell))
#print ('delRowIfColEq: needle %s found in row %i'% (needle, r))
self.delRow(r)
r -=1
def delRowIfColEq (self,col, needle): pass
def renameCol (self, cnameOld, cnameNew):
'''
renames column cnameOld into cnameNew
'''
c=self.cindex(cnameOld)
self.table[0][c]=cnameNew
def default_per_col (cname, default_value):
'''
Default Value: if cell is empty replace with default value
self.default_per_col ('status', 'filled')
'''
cid=td.cindex(cname)
for rid in range(1, td.nrows()):
if not td.cell (cid,rid):
self.table[rid][cid]=default_value
###
### converting to outside world
###
def _outTest(self,out):
if os.path.exists(out):
self.verbose('Output exists already, will be overwritten: %s' %out)
def write (self, out):
'''
write to file with extension-awareness
'''
ext=os.path.splitext(out)[1][1:].lower()
if (ext == 'xml'):
self.writeXML (out)
elif (ext == 'csv'):
self.writeCSV (out)
elif (ext == 'json'):
self.writeJSON (out)
else:
print ('Format %s not recognized' % ext)
def writeCSV (self,outfile):
'''
writes data in tableData object to outfile in csv format
Values with commas are quoted.
'''
import csv
self._outTest(outfile)
with open(outfile, mode='w', newline='', encoding='utf-8') as csvfile:
out = csv.writer(csvfile, dialect='excel')
for r in range(0, self.nrows()):
row=self.table[r]
out.writerow(row)
self.verbose ('csv written to %s' % outfile)
def writeXML (self,out):
'''
writes table data to file out in xml format
'''
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
root = ET.Element("tdx") #table data xml
self._outTest(out)
def _indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#don't need cnames here, so start at 1, but then write all columns in first row
for r in range(1, self.nrows()):
doc = ET.SubElement(root, "row")
for c in range(0, self.ncols()):
cell = self.cell(c,r)
#print ('x,y: %i/%i: %s->%s ' % (r, c, self.columns[c], cell))
#for round trip I need empty cells, at least in the first row
if cell or r == 1:
ET.SubElement(doc, self.table[0][c]).text=escape(str(cell))
tree = ET.ElementTree(root)
_indent(root)
tree.write(out, encoding='UTF-8', xml_declaration=True)
self.verbose ('xml written to %s' % out)
def writeJSON (self, out):
'''
Writes table data in json to file out
JSON doesn't have date type, hence default=str
'''
import json
self._outTest(out)
f = open(out, 'w')
with f as outfile:
json.dump(self.table, outfile, default=str)
self.verbose ('json written to %s' % out)
if __name__ == '__main__': pass
| def addCol (self,name):
'''
Add a new column called name at the end of the row.
Cells with be empty.
Returns the cid of the new column, same as cindex(cname).
'''
#update
self.table[0].append(name)
self._uniqueColumns()
for rid in range(1, self.nrows()):
self.table[rid].append('') # append empty cells for all rows
return len(self.table[0])-1 # len starts counting at 1, but I want 0 | 308 | 320 | import os
'''
TableData deals with data that comes from MS Excel, csv, xml. More precisely, it expects
a single table which has headings in the first row. It converts between these formats and usually keeps
information on a round trip between those formats identical.
TableData also allows for simple transformations, like dropping a column.
CONVENTIONS
*cid is column no or column id
*rid is row no or row id
*cell refers the content of a cell, a cell is represented by cid|rid, as two integers or (not sure yet) a tuple or a list
*cname is the column name (in row 0)
NOTE
* (x|y) not rows x cols
* Currently internal cells do have a type, which may be flattened to str if output is type agnostic.
* cid and rid begins with 0, so first cell is 0|0, but ncols and nrows start at 1. Strangely enough, sometimes that is convenient.
* interface prefers cname over cid
LIMITATIONS
Data is stored in memory (in a two dimensional list of lists), so max. size depends on available memory (ram).
WHAT NOT TO DO
I will NOT allow conversion INTO Excel xsl format, only reading from it.
I will not abstract this thing too far. I write it for my current Excel version and the csv flavor that I
need (e.g. csv is escaped only for values that contain commas). I don't need multiple Excel sheets,
formatting in Excel, lots of types in Excel.
UNICODE
I am going for UTF-8 encoding, but not sure I have it everywhere yet. xlrd is internally in UTF16LE, I believe.
Roundtrip Exceptions
*date
XML Format made by TableData is
<tdx>
<row>
<cnameA>cell value</cnameA>
<cnameB>cell value</cnameB>
...
</row>
</tdx>
The first row will have all columns, even empty ones. The other rows usually omit empty elements with empty values.
'''
class TableData:
def verbose (self, msg):
if self._verbose:
print (msg)
def _uniqueColumns (self):
'''
raise exception if column names (cnames) are not unique
'''
if len(set(self.table[0])) != len(self.table[0]):
raise Exception('Column names not unique')
def __init__ (self, ingester, infile, verbose=None):
self._verbose=verbose
if ingester == 'xml':
self.XMLParser(infile)
elif ingester == 'xls':
self.XLRDParser(infile)
elif ingester == 'csv':
self.CSVParser(infile)
elif ingester == 'json':
self.JSONParser(infile)
#todo: modern excel
else:
raise Exception ('Ingester %s not found' % ingester)
self._uniqueColumns()
#
# INGESTERS (xml, csv)
#
def load_table (path, verbose=None):
'''
File extension aware ingester
td=TableData.load_table(path)
This is an alternative to _init_. Is this pythonic enough?
'''
ext=os.path.splitext(path)[1][1:]
return TableData (ext, path,verbose)
def XLRDParser (self, infile):
'''
Parses old excel file into tableData object. Only first sheet.
Dont use this directly, use
td=TableData('xsl', infile)
td=TableData.load=table(infile)
instead
xlrd uses UTF16. What comes out of here?
TO DO:
1. better tests for
-Unicode issues not tested
-Excel data fields change appearance
2. conversion/transformation stuff
'''
import xlrd
import xlrd.sheet
from xlrd.sheet import ctype_text
self.table=[] # will hold sheet in memory as list of list
self.verbose ('xlrd infile %s' % infile)
#if not os.path.isfile(infile):
# raise Exception ('Input file not found')
wb = xlrd.open_workbook(filename=infile, on_demand=True)
sheet= wb.sheet_by_index(0)
#I'm assuming here that first row consist only of text cells?
#start at r=0 because we want to preserve the columns
for r in range(0, sheet.nrows): #no
row=[]
for c in range(sheet.ncols):
cell = sheet.cell(r, c)
cellTypeStr = ctype_text.get(cell.ctype, 'unknown type')
val=cell.value
#convert cell types -> dates look changed, but may not be (seconds since epoch)!
if cellTypeStr == "number":
val=int(float(val))
elif cellTypeStr == "xldate":
val=xlrd.xldate.xldate_as_datetime(val, 0)
#Warn if comma -> to check if escaped correctly -> quoting works
#if ',' in str(val):
# self.verbose ("%i/%i contains a comma" % (c,r) )
row.append(val)
self.table.append(row)
wb.unload_sheet(0) #unload xlrd sheet to save memory
def CSVParser (self,infile):
import csv
self.table=[] # will hold sheet in memory as list of list
self.verbose ('csvParser: ' + str(infile))
with open(infile, mode='r', newline='') as csvfile:
incsv = csv.reader(csvfile, dialect='excel')
for row in incsv:
self.table.append(row)
#self.verbose (str(row))
def XMLParser (self,infile):
#It is practically impossible to reconstruct the full list of columns from xml file
#if xmlWriter leaves out empty elements. Instead, I write them at least for first row.
self.table=[] # will hold sheet in memory as list of list; overwrite
self.verbose ('xml infile %s' % infile)
import xml.etree.ElementTree as ET
tree = ET.parse(infile)
for row in tree.iter("row"):
c=0
cnames=[]
col=[]
for e in row.iter():
if e.tag !='row':
#self.verbose ('%s %s' % (e.tag, e.text))
if len(self.table) == 0:
#need to create 2 rows from first row in xml
cnames.append(e.tag)
col.append(e.text)
if len(self.table) == 0:
self.table.append(cnames)
self.table.append(col)
#self.verbose (self.table)
def JSONParser (self, infile):
self.table=[] # will hold sheet in memory as list of list; overwrite
import json
self.verbose ('json infile %s' % infile)
json_data = open(infile, 'r').read()
self.table = json.loads(json_data)
##
## read table data, but NO manipulations
##
def ncols(self):
'''
Returns integer with number of columns in table data
'''
return len(self.table[0])
def nrows (self):
'''
Returns integer with number of rows in table data
'''
return len(self.table)
def cell (self, col,row):
'''
Return a cell for col,row.
td.cell(col,row)
Throws exception if col or row are not integer or out of range.
What happens on empty cell?
I stick to x|y format, although row|col might be more pythonic.
Empty cell is '' not None.
'''
try:
return self.table[row][col]
except:
self.verbose ('%i|%i doesnt exist' % (col, row))
exit (1)
def cindex (self,needle):
'''
Returns the column index (c) for column name 'needle'.
Throws 'not in list' if 'needle' is not a column name (cname).
'''
return self.table[0].index(needle)
def colExists (self, cname):
try:
self.table[0].index(cname)
return True
except:
return False
def search (self, needle):
'''
Returns list of cells [cid,rid] that contain the needle.
r=td.search(needle) # (1,1)
tuples, lists? I am not quite sure!
'''
results=[]
for rid in range(0, self.nrows()):
for cid in range(0, self.ncols()):
cell=self.cell(cid, rid)
#self.verbose ('ce:'+str(cell))
if str(needle) in str(cell):
#self.verbose ("%i/%i:%s->%s" % (cid, rid, cell, needle))
results.append ((cid,rid))
return results
def search_col (self, cname, needle):
'''
Returns list/set of rows that contain the needle for the given col.
td.search(cname, needle)
'''
results=()
c=cindex(cname)
for rid in range(0, self.nrows()):
if needle in self.cell(c,rid):
results.append(rid)
def show (self):
'''
print representation of table
Really print? Why not.
'''
for row in self.table:
print (row)
print ('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows()))
##
## SIMPLE UNCONDITIONAL TRANSFORMATIONS
##
def delRow (self, r):
'''
Drop a row by number.
Need to remake the index to cover the hole.
'''
#r always means rid
self.table.pop(r)
#print ('row %i deleted' % r)
def delCol (self, cname):
'''
Drop a column by cname
(Not tested.)
'''
c=self.cindex (cname)
for r in range(0, self.nrows()):
self.table[r].pop(c)
def addCol (self,name):
'''
Add a new column called name at the end of the row.
Cells with be empty.
Returns the cid of the new column, same as cindex(cname).
'''
#update
self.table[0].append(name)
self._uniqueColumns()
for rid in range(1, self.nrows()):
self.table[rid].append('') # append empty cells for all rows
return len(self.table[0])-1 # len starts counting at 1, but I want 0
def clean_whitespace (self,cname):
cid=self.cindex(cname)
for rid in range(1, td.nrows()):
td.table[rid][cid]=td.table[rid][cid].replace('\r\n', ' ').replace(' ', ' ')
##
## MORE COMPLEX MANIPULATION
##
def delCellAIfColBEq (self,cnameA, cnameB, needle):
'''
empty cell in column cnameA if value in column cnameB equals needle in every row
untested
'''
colA=self.cindex(cnameA)
colB=self.cindex(cnameB)
for rid in range(1, self.nrows()):
if self.table[rid][colB] == needle:
self.verbose ('delCellAifColBEq A:%s, B:%s, needle %s' % (cnameA, cnameB, needle))
selt.table[rid][colA]=''
def delCellAIfColBContains (self,col_a, col_b, needle): pass
def delRowIfColContains (self, cname, needle):
'''
Delete row if column equals the value 'needle'
Should we use cname or c (colId)?
'''
#cant loop thru rows and delete one during the loop
col=self.cindex(cname)
#it appears that excel and xlrd start with 1
#todo: not sure why I have shave off one here!
r=self.nrows()-1
while r> 1:
#print ('AA%i/%i: ' % (r,col))
cell=self.cell (r, col)
if needle in str(cell):
#print ('DD:%i/%s:%s' % (r, cname, cell))
#print ('delRowIfColEq: needle %s found in row %i'% (needle, r))
self.delRow(r)
r -=1
def delRowIfColEq (self,col, needle): pass
def renameCol (self, cnameOld, cnameNew):
'''
renames column cnameOld into cnameNew
'''
c=self.cindex(cnameOld)
self.table[0][c]=cnameNew
def default_per_col (cname, default_value):
'''
Default Value: if cell is empty replace with default value
self.default_per_col ('status', 'filled')
'''
cid=td.cindex(cname)
for rid in range(1, td.nrows()):
if not td.cell (cid,rid):
self.table[rid][cid]=default_value
###
### converting to outside world
###
def _outTest(self,out):
if os.path.exists(out):
self.verbose('Output exists already, will be overwritten: %s' %out)
def write (self, out):
'''
write to file with extension-awareness
'''
ext=os.path.splitext(out)[1][1:].lower()
if (ext == 'xml'):
self.writeXML (out)
elif (ext == 'csv'):
self.writeCSV (out)
elif (ext == 'json'):
self.writeJSON (out)
else:
print ('Format %s not recognized' % ext)
def writeCSV (self,outfile):
'''
writes data in tableData object to outfile in csv format
Values with commas are quoted.
'''
import csv
self._outTest(outfile)
with open(outfile, mode='w', newline='', encoding='utf-8') as csvfile:
out = csv.writer(csvfile, dialect='excel')
for r in range(0, self.nrows()):
row=self.table[r]
out.writerow(row)
self.verbose ('csv written to %s' % outfile)
def writeXML (self,out):
'''
writes table data to file out in xml format
'''
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
root = ET.Element("tdx") #table data xml
self._outTest(out)
def _indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#don't need cnames here, so start at 1, but then write all columns in first row
for r in range(1, self.nrows()):
doc = ET.SubElement(root, "row")
for c in range(0, self.ncols()):
cell = self.cell(c,r)
#print ('x,y: %i/%i: %s->%s ' % (r, c, self.columns[c], cell))
#for round trip I need empty cells, at least in the first row
if cell or r == 1:
ET.SubElement(doc, self.table[0][c]).text=escape(str(cell))
tree = ET.ElementTree(root)
_indent(root)
tree.write(out, encoding='UTF-8', xml_declaration=True)
self.verbose ('xml written to %s' % out)
def writeJSON (self, out):
'''
Writes table data in json to file out
JSON doesn't have date type, hence default=str
'''
import json
self._outTest(out)
f = open(out, 'w')
with f as outfile:
json.dump(self.table, outfile, default=str)
self.verbose ('json written to %s' % out)
if __name__ == '__main__': pass
|
get_markdown_text_for_pub | Gets a dictionary `pub`, returns a markdown formatted text.
An example pub:
{'authors': 'McLellan, S. L., and Eren, A. M.',
'doi': '10.1016/j.tim.2014.08.002',
'issue': '22(12), 697-706',
'title': 'Discovering new indicators of fecal pollution.',
'journal': 'Trends Microbiol',
'year': 2014} | # -*- coding: utf-8 -*-
# an ugly hack to convert some stuff into other stuff...
# EDIT THESE #####################################################################
names_to_highlight = ['Eren AM',
'Delmont TO',
'Esen ÖC',
'Lee STM',
'Shaiber A',
'Kiefl E',
'Cui S',
'Watson AR',
'Lolans K']
journal_name_fixes = [('The ISME journal', 'ISME J'),
('Proceedings of the National Academy of Sciences of the United States of America', 'Proc Natl Acad Sci U S A'),
('Proceedings of the National Academy of Sciences', 'Proc Natl Acad Sci U S A'),
('Frontiers in Microbiology', 'Front Microbiol')]
keep_pubs_after_year = 2009
##################################################################################
import os
import sys
from datetime import datetime
try:
import anvio.utils as u
from anvio.errors import ConfigError
except:
sys.stderr.write("This program requires anvi'o to be installed :/\n")
sys.exit(-1)
class Publications:
def __init__(self, pubs_file_path='pubs.txt', pubs_info_file_path='pubs_info.txt'):
"""Takes an EndNote library exported a TXT file (`pubs_file_path`), and an optional\
TAB-delimited info file path with DOI identifiers (`pubs_info_file_path`), and\
generates some Markdown formatted output.
Here is an info line from the EndNote:
Winterberg, K. M., and Reznikoff, W. S. (2007). "Screening transposon mutant libraries using full-genome oligonucleotide microarrays." Methods Enzymol, 421, 110-25.
Absolute matching to this format is required.
Expected headers in the TAB-delimited pubs info file are 'doi', 'highlights',\
and 'featured_image'.
- doi: The DOI of the pub matching to a pubs file path entry.
- highlights: Brief bullet points about the work. Each pont must be separated\
from the rest with a ';' character. HTML tags are OK.
- featured_image: A URL to an image.
If things are not working, feel free to write to meren at uchicago.edu
"""
self.info = {}
self.pubs_dict = {}
self.journals_list = []
self.authors_list = []
self.recent_authors_list = []
self.author_links = {}
self.pubs_file_path = pubs_file_path
self.pubs_info_file_path = pubs_info_file_path
def get_author_highlights(self, pub):
authors_str = []
for author in pub['authors']:
if author in pub['co_first_authors']:
author_h = author + '<sup>☯</sup>'
elif author in pub['co_senior_authors']:
author_h = author + '<sup>‡</sup>'
else:
author_h = author
if author in names_to_highlight:
authors_str.append('<span class="pub-member-author">%s</span>' % (author_h))
else:
authors_str.append(author_h)
return ', '.join(authors_str)
def parse_pubs_txt(self):
if os.path.exists(self.pubs_info_file_path):
self.info = u.get_TAB_delimited_file_as_dictionary(self.pubs_info_file_path)
pubs_header = u.get_columns_of_TAB_delim_file(self.pubs_file_path, include_first_column=True)
headers_expected = ['Authors', 'Title', 'Publication', 'Volume', 'Number', 'Pages', 'Year', 'doi']
missing_headers = [h for h in pubs_header if h not in headers_expected]
if len(missing_headers):
raise ConfigError("Sorry, the pubs.txt seems to be missing some of the headers that are mandatory. Each of \
the columns in the following list must be present in this file: %s (hint: yours do not have\
the following: %s)." % (', '.join(headers_expected), ', '.join(missing_headers)))
self.pubs_txt = u.get_TAB_delimited_file_as_dictionary(self.pubs_file_path, indexing_field=pubs_header.index('doi'))
for doi in self.pubs_txt:
authors = []
co_first_authors = []
co_senior_authors = []
p = self.pubs_txt[doi]
for author in [_.strip() for _ in p['Authors'].split(';')]:
if not len(author):
continue
author_last_name, author_first_name_raw = [_.strip() for _ in author.split(',')]
author_first_name = ''.join([n[0] for n in author_first_name_raw.split()])
author_final_name = '%s %s' % (author_last_name, author_first_name)
if author_first_name_raw.endswith('*'):
co_first_authors.append(author_final_name)
elif author_first_name_raw.endswith('+'):
co_senior_authors.append(author_final_name)
authors.append(author_final_name)
if p['Number']:
issue = '%s(%s):%s' % (p['Volume'], p['Number'], p['Pages'])
else:
issue = '%s:%s' % (p['Volume'], p['Pages'])
year = p['Year'].strip()
pub_entry = {'authors': authors, 'title': p['Title'], 'journal': p['Publication'], 'issue': issue, 'doi': doi, 'year': year, 'co_first_authors': co_first_authors, 'co_senior_authors': co_senior_authors}
if year not in self.pubs_dict:
self.pubs_dict[year] = [pub_entry]
else:
self.pubs_dict[year].append(pub_entry)
# MASKED: get_markdown_text_for_pub function (lines 138-191)
def store_markdown_output_for_pubs(self, output_file_path):
# years = ''.join(['<a href="#%s"><span class="category-item">%s <small>(%d)</small></span></a>' % (y, y, len(self.pubs_dict[y])) for y in sorted(list(self.pubs_dict.keys()), reverse=True)])
years = ''.join(['<a href="#%s"><span class="category-item">%s</span></a>' % (y, y) for y in sorted(list(self.pubs_dict.keys()), reverse=True)])
output_file = open(output_file_path, 'w')
W = lambda s: output_file.write(s + '\n')
W('---')
W('layout: publications')
W('modified: %s' % datetime.today().strftime('%Y-%m-%d'))
W('comments: false')
W('---\n')
W('''<script type='text/javascript' src='https://d1bxh8uas1mnw7.cloudfront.net/assets/embed.js'></script>\n''')
W('''<script async src="https://badge.dimensions.ai/badge.js" charset="utf-8"></script>\n''')
W('<div class="category-box">\n%s\n</div>\n' % years)
W('{:.notice}\n')
W("This page shows publications that are most reflective of our interests. For a complete list, please see <a href='https://scholar.google.com/citations?user=GtLLuxoAAAAJ&view_op=list_works&sortby=pubdate' target='_blank'>Meren's Google Scholar page</a>.\n")
for year in sorted(list(self.pubs_dict.keys()), reverse=True):
W('<a name="%s"> </a>' % year)
W('<h1>%s</h1>\n' % year)
for pub in self.pubs_dict[year]:
W(self.get_markdown_text_for_pub(pub))
W('')
if __name__ == '__main__':
pubs = Publications()
try:
pubs.parse_pubs_txt()
pubs.store_markdown_output_for_pubs('publications/index.md')
except ConfigError as e:
print(e)
sys.exit(-1) | def get_markdown_text_for_pub(self, pub):
"""Gets a dictionary `pub`, returns a markdown formatted text.
An example pub:
{'authors': 'McLellan, S. L., and Eren, A. M.',
'doi': '10.1016/j.tim.2014.08.002',
'issue': '22(12), 697-706',
'title': 'Discovering new indicators of fecal pollution.',
'journal': 'Trends Microbiol',
'year': 2014}
"""
pub_md = []
A = lambda s: pub_md.append(s)
A('<div class="pub">')
A('''<div class='altmetric-embed' data-badge-type='donut' data-doi="%s"></div>''' % pub['doi'])
A('''<div class="__dimensions_badge_embed__" data-doi="%s" data-hide-zero-citations="true" data-legend="hover-bottom" data-style="small_circle"></div>''' % pub['doi'])
if pub['doi']:
A(' <h3><a href="%s" target="_new">%s</a></h3>' % (' https://doi.org/%s' % (pub['doi']), pub['title']))
else:
A(' <h3><a href="http://scholar.google.com/scholar?hl=en&q=%s" target="_new">%s</a></h3>' % ('http://scholar.google.com/scholar?hl=en&q=%s' % (pub['title'].replace(' ', '+')), pub['title']))
A(' <span class="pub-authors">%s</span>' % self.get_author_highlights(pub))
if pub['co_first_authors'] and not pub['co_senior_authors']:
A(' <span class="pub-co-first-authors"><sup>☯</sup>Co-first authors</span>')
elif pub['co_first_authors'] and pub['co_senior_authors']:
A(' <span class="pub-co-first-authors"><sup>☯</sup>Co-first authors; <sup>‡</sup>Co-senior authors</span>')
elif pub['co_senior_authors'] and not pub['co_first_authors']:
A(' <span class="pub-co-first-authors"><sup>‡</sup>Co-senior authors</span>')
if pub['doi'] in self.info:
info = self.info[pub['doi']]
A(' <div class="%s">' % ('pub-info' if info['featured_image'] else 'pub-info-no-image'))
if info['featured_image']:
A(' <div class="pub-featured-image">')
A(' <a href="%s"><img src="%s" style="max-width: 100px; max-height: 80px; width: auto; border: none; height: auto; margin: 0 auto; display: block; transform: translateY(15%%);"/></a>' % (info['featured_image'], info['featured_image']))
A(' </div>')
highlights = info['highlights'].split(';') if info['highlights'] else None
if highlights:
A(' <div class="%s">' % ('pub-highlights' if info['featured_image'] else 'pub-highlights-no-image'))
A(' %s' % '<br>'.join(['<span style="display: inline-block; padding-bottom: 5px;">- %s</span>' % h for h in highlights]))
A(' </div>')
A(' </div>')
A(' <span class="pub-journal"><b>%s</b>, %s.</span>' % (pub['journal'], pub['issue']))
A('</div>\n')
return '\n'.join(pub_md) | 138 | 191 | # -*- coding: utf-8 -*-
# an ugly hack to convert some stuff into other stuff...
# EDIT THESE #####################################################################
names_to_highlight = ['Eren AM',
'Delmont TO',
'Esen ÖC',
'Lee STM',
'Shaiber A',
'Kiefl E',
'Cui S',
'Watson AR',
'Lolans K']
journal_name_fixes = [('The ISME journal', 'ISME J'),
('Proceedings of the National Academy of Sciences of the United States of America', 'Proc Natl Acad Sci U S A'),
('Proceedings of the National Academy of Sciences', 'Proc Natl Acad Sci U S A'),
('Frontiers in Microbiology', 'Front Microbiol')]
keep_pubs_after_year = 2009
##################################################################################
import os
import sys
from datetime import datetime
try:
import anvio.utils as u
from anvio.errors import ConfigError
except:
sys.stderr.write("This program requires anvi'o to be installed :/\n")
sys.exit(-1)
class Publications:
def __init__(self, pubs_file_path='pubs.txt', pubs_info_file_path='pubs_info.txt'):
"""Takes an EndNote library exported a TXT file (`pubs_file_path`), and an optional\
TAB-delimited info file path with DOI identifiers (`pubs_info_file_path`), and\
generates some Markdown formatted output.
Here is an info line from the EndNote:
Winterberg, K. M., and Reznikoff, W. S. (2007). "Screening transposon mutant libraries using full-genome oligonucleotide microarrays." Methods Enzymol, 421, 110-25.
Absolute matching to this format is required.
Expected headers in the TAB-delimited pubs info file are 'doi', 'highlights',\
and 'featured_image'.
- doi: The DOI of the pub matching to a pubs file path entry.
- highlights: Brief bullet points about the work. Each pont must be separated\
from the rest with a ';' character. HTML tags are OK.
- featured_image: A URL to an image.
If things are not working, feel free to write to meren at uchicago.edu
"""
self.info = {}
self.pubs_dict = {}
self.journals_list = []
self.authors_list = []
self.recent_authors_list = []
self.author_links = {}
self.pubs_file_path = pubs_file_path
self.pubs_info_file_path = pubs_info_file_path
def get_author_highlights(self, pub):
authors_str = []
for author in pub['authors']:
if author in pub['co_first_authors']:
author_h = author + '<sup>☯</sup>'
elif author in pub['co_senior_authors']:
author_h = author + '<sup>‡</sup>'
else:
author_h = author
if author in names_to_highlight:
authors_str.append('<span class="pub-member-author">%s</span>' % (author_h))
else:
authors_str.append(author_h)
return ', '.join(authors_str)
def parse_pubs_txt(self):
if os.path.exists(self.pubs_info_file_path):
self.info = u.get_TAB_delimited_file_as_dictionary(self.pubs_info_file_path)
pubs_header = u.get_columns_of_TAB_delim_file(self.pubs_file_path, include_first_column=True)
headers_expected = ['Authors', 'Title', 'Publication', 'Volume', 'Number', 'Pages', 'Year', 'doi']
missing_headers = [h for h in pubs_header if h not in headers_expected]
if len(missing_headers):
raise ConfigError("Sorry, the pubs.txt seems to be missing some of the headers that are mandatory. Each of \
the columns in the following list must be present in this file: %s (hint: yours do not have\
the following: %s)." % (', '.join(headers_expected), ', '.join(missing_headers)))
self.pubs_txt = u.get_TAB_delimited_file_as_dictionary(self.pubs_file_path, indexing_field=pubs_header.index('doi'))
for doi in self.pubs_txt:
authors = []
co_first_authors = []
co_senior_authors = []
p = self.pubs_txt[doi]
for author in [_.strip() for _ in p['Authors'].split(';')]:
if not len(author):
continue
author_last_name, author_first_name_raw = [_.strip() for _ in author.split(',')]
author_first_name = ''.join([n[0] for n in author_first_name_raw.split()])
author_final_name = '%s %s' % (author_last_name, author_first_name)
if author_first_name_raw.endswith('*'):
co_first_authors.append(author_final_name)
elif author_first_name_raw.endswith('+'):
co_senior_authors.append(author_final_name)
authors.append(author_final_name)
if p['Number']:
issue = '%s(%s):%s' % (p['Volume'], p['Number'], p['Pages'])
else:
issue = '%s:%s' % (p['Volume'], p['Pages'])
year = p['Year'].strip()
pub_entry = {'authors': authors, 'title': p['Title'], 'journal': p['Publication'], 'issue': issue, 'doi': doi, 'year': year, 'co_first_authors': co_first_authors, 'co_senior_authors': co_senior_authors}
if year not in self.pubs_dict:
self.pubs_dict[year] = [pub_entry]
else:
self.pubs_dict[year].append(pub_entry)
def get_markdown_text_for_pub(self, pub):
"""Gets a dictionary `pub`, returns a markdown formatted text.
An example pub:
{'authors': 'McLellan, S. L., and Eren, A. M.',
'doi': '10.1016/j.tim.2014.08.002',
'issue': '22(12), 697-706',
'title': 'Discovering new indicators of fecal pollution.',
'journal': 'Trends Microbiol',
'year': 2014}
"""
pub_md = []
A = lambda s: pub_md.append(s)
A('<div class="pub">')
A('''<div class='altmetric-embed' data-badge-type='donut' data-doi="%s"></div>''' % pub['doi'])
A('''<div class="__dimensions_badge_embed__" data-doi="%s" data-hide-zero-citations="true" data-legend="hover-bottom" data-style="small_circle"></div>''' % pub['doi'])
if pub['doi']:
A(' <h3><a href="%s" target="_new">%s</a></h3>' % (' https://doi.org/%s' % (pub['doi']), pub['title']))
else:
A(' <h3><a href="http://scholar.google.com/scholar?hl=en&q=%s" target="_new">%s</a></h3>' % ('http://scholar.google.com/scholar?hl=en&q=%s' % (pub['title'].replace(' ', '+')), pub['title']))
A(' <span class="pub-authors">%s</span>' % self.get_author_highlights(pub))
if pub['co_first_authors'] and not pub['co_senior_authors']:
A(' <span class="pub-co-first-authors"><sup>☯</sup>Co-first authors</span>')
elif pub['co_first_authors'] and pub['co_senior_authors']:
A(' <span class="pub-co-first-authors"><sup>☯</sup>Co-first authors; <sup>‡</sup>Co-senior authors</span>')
elif pub['co_senior_authors'] and not pub['co_first_authors']:
A(' <span class="pub-co-first-authors"><sup>‡</sup>Co-senior authors</span>')
if pub['doi'] in self.info:
info = self.info[pub['doi']]
A(' <div class="%s">' % ('pub-info' if info['featured_image'] else 'pub-info-no-image'))
if info['featured_image']:
A(' <div class="pub-featured-image">')
A(' <a href="%s"><img src="%s" style="max-width: 100px; max-height: 80px; width: auto; border: none; height: auto; margin: 0 auto; display: block; transform: translateY(15%%);"/></a>' % (info['featured_image'], info['featured_image']))
A(' </div>')
highlights = info['highlights'].split(';') if info['highlights'] else None
if highlights:
A(' <div class="%s">' % ('pub-highlights' if info['featured_image'] else 'pub-highlights-no-image'))
A(' %s' % '<br>'.join(['<span style="display: inline-block; padding-bottom: 5px;">- %s</span>' % h for h in highlights]))
A(' </div>')
A(' </div>')
A(' <span class="pub-journal"><b>%s</b>, %s.</span>' % (pub['journal'], pub['issue']))
A('</div>\n')
return '\n'.join(pub_md)
def store_markdown_output_for_pubs(self, output_file_path):
# years = ''.join(['<a href="#%s"><span class="category-item">%s <small>(%d)</small></span></a>' % (y, y, len(self.pubs_dict[y])) for y in sorted(list(self.pubs_dict.keys()), reverse=True)])
years = ''.join(['<a href="#%s"><span class="category-item">%s</span></a>' % (y, y) for y in sorted(list(self.pubs_dict.keys()), reverse=True)])
output_file = open(output_file_path, 'w')
W = lambda s: output_file.write(s + '\n')
W('---')
W('layout: publications')
W('modified: %s' % datetime.today().strftime('%Y-%m-%d'))
W('comments: false')
W('---\n')
W('''<script type='text/javascript' src='https://d1bxh8uas1mnw7.cloudfront.net/assets/embed.js'></script>\n''')
W('''<script async src="https://badge.dimensions.ai/badge.js" charset="utf-8"></script>\n''')
W('<div class="category-box">\n%s\n</div>\n' % years)
W('{:.notice}\n')
W("This page shows publications that are most reflective of our interests. For a complete list, please see <a href='https://scholar.google.com/citations?user=GtLLuxoAAAAJ&view_op=list_works&sortby=pubdate' target='_blank'>Meren's Google Scholar page</a>.\n")
for year in sorted(list(self.pubs_dict.keys()), reverse=True):
W('<a name="%s"> </a>' % year)
W('<h1>%s</h1>\n' % year)
for pub in self.pubs_dict[year]:
W(self.get_markdown_text_for_pub(pub))
W('')
if __name__ == '__main__':
pubs = Publications()
try:
pubs.parse_pubs_txt()
pubs.store_markdown_output_for_pubs('publications/index.md')
except ConfigError as e:
print(e)
sys.exit(-1)
|
build_format_file | Creates the non-xml SQL format file. Puts 4 spaces between each section.
See https://docs.microsoft.com/en-us/sql/relational-databases/import-export/non-xml-format-files-sql-server
for the specification of the file.
# TODO add params/options to control:
# - the char type (not just SQLCHAR),
Parameters
----------
df : pandas DataFrame
delimiter : a valid delimiter character
db_cols_order : dict, optional
Dict of {database column name -> ordinal position of the column}.
Maps existing columns in the database to their ordinal position, i.e. the order of the columns in the db table.
1-indexed, so the first columns is 1, second is 2, etc.
Only needed if the order of the columns in the dataframe doesn't match the database.
Returns
-------
A string containing the format file | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 23:07:15 2019
@author: ydima
"""
import logging
import os
from pathlib import Path
import random
import shlex
import string
from subprocess import PIPE, Popen
import tempfile
from typing import Dict, List, Optional, Union
import pandas as pd
from .constants import (
DIRECTIONS,
IN,
IS_WIN32,
NEWLINE,
OUT,
QUERY,
QUERYOUT,
SQLCHAR,
TABLE,
VIEW,
BCPandasException,
BCPandasValueError,
read_data_settings,
sql_collation,
)
logger = logging.getLogger(__name__)
def bcp(
sql_item: str,
direction: str,
flat_file: str,
creds,
sql_type: str = "table",
schema: str = "dbo",
format_file_path: str = None,
batch_size: int = None,
col_delimiter: str = None,
row_terminator: str = None,
bcp_path: Union[str, Path] = None,
error_file_path: str = None
):
"""
See https://docs.microsoft.com/en-us/sql/tools/bcp-utility
"""
combos = {TABLE: [IN, OUT], QUERY: [QUERYOUT], VIEW: [IN, OUT]}
direc = direction.lower()
# validation
if direc not in DIRECTIONS:
raise BCPandasValueError(
f"Param 'direction' must be one of {DIRECTIONS}, you passed {direc}"
)
if direc not in combos[sql_type]:
raise BCPandasValueError(
f"Wrong combo of direction and SQL object, you passed {sql_type} and {direc} ."
)
# auth
if creds.with_krb_auth:
auth = ["-T"]
else:
auth = ["-U", creds.username, "-P", creds.password]
# prepare SQL item string
if sql_type == QUERY:
# remove newlines for queries, otherwise messes up BCP
sql_item_string = quote_this("".join(sql_item.splitlines()))
else:
sql_item_string = f"{schema}.{sql_item}"
# construct BCP command
bcp_command = [
"bcp" if bcp_path is None else quote_this(str(bcp_path)),
sql_item_string,
direc,
flat_file,
"-S",
creds.server,
"-d",
creds.database,
"-q", # Executes the SET QUOTED_IDENTIFIERS ON statement, needed for Azure SQL DW
"-e",
error_file_path
] + auth
if batch_size:
bcp_command += ["-b", str(batch_size)]
# formats
if direc == IN:
bcp_command += ["-f", format_file_path]
elif direc in (OUT, QUERYOUT):
bcp_command += [
"-c", # marking as character data, not Unicode (maybe make as param?)
quote_this(
f"-t{read_data_settings['delimiter'] if col_delimiter is None else col_delimiter}"
),
quote_this(
f"-r{read_data_settings['newline'] if row_terminator is None else row_terminator}"
),
]
# execute
bcp_command_log = [c if c != creds.password else "[REDACTED]" for c in bcp_command]
logger.info(f"Executing BCP command now... \nBCP command is: {bcp_command_log}")
ret_code = run_cmd(bcp_command)
if ret_code:
raise BCPandasException(f"Bcp command failed with exit code {ret_code}")
def get_temp_file() -> str:
"""
Returns full path to a temporary file without creating it.
"""
tmp_dir = tempfile.gettempdir()
file_path = os.path.join(
tmp_dir, "".join(random.choices(string.ascii_letters + string.digits, k=21))
)
return file_path
def _escape(input_string: str) -> str:
"""
Adopted from https://github.com/titan550/bcpy/blob/master/bcpy/format_file_builder.py#L25
"""
return (
input_string.replace('"', '\\"')
.replace("'", "\\'")
.replace("\r", "\\r")
.replace("\n", "\\n")
)
# MASKED: build_format_file function (lines 145-193)
def quote_this(this: str, skip: bool = False) -> str:
"""
OS-safe way to quote a string.
Returns the string with quotes around it.
On Windows ~~it's double quotes~~ we skip quoting,
on Linux it's single quotes.
"""
if isinstance(this, str):
if IS_WIN32:
return this # TODO maybe change?
else:
return shlex.quote(this)
else:
return this
def run_cmd(cmd: List[str]) -> int:
"""
Runs the given command.
Prints STDOUT in real time, prints STDERR when command is complete,
and logs both STDOUT and STDERR.
Paramters
---------
cmd : list of str
The command to run, to be submitted to `subprocess.Popen()`
Returns
-------
The exit code of the command
"""
if IS_WIN32:
with_shell = False
else:
with_shell = True
cmd = " ".join(cmd) # type: ignore
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, encoding="utf-8", errors="utf-8", shell=with_shell,)
# live stream STDOUT
while True:
outs = proc.stdout.readline()
if outs:
print(outs, end="")
logger.info(outs)
if proc.poll() is not None and outs == "":
break
errs = proc.stderr.readlines()
if errs:
print(errs, end="")
logger.error(errs)
return proc.returncode | def build_format_file(
df: pd.DataFrame, delimiter: str, db_cols_order: Optional[Dict[str, int]] = None
) -> str:
"""
Creates the non-xml SQL format file. Puts 4 spaces between each section.
See https://docs.microsoft.com/en-us/sql/relational-databases/import-export/non-xml-format-files-sql-server
for the specification of the file.
# TODO add params/options to control:
# - the char type (not just SQLCHAR),
Parameters
----------
df : pandas DataFrame
delimiter : a valid delimiter character
db_cols_order : dict, optional
Dict of {database column name -> ordinal position of the column}.
Maps existing columns in the database to their ordinal position, i.e. the order of the columns in the db table.
1-indexed, so the first columns is 1, second is 2, etc.
Only needed if the order of the columns in the dataframe doesn't match the database.
Returns
-------
A string containing the format file
"""
_space = " " * 4
format_file_str = f"9.0\n{len(df.columns)}\n" # Version and Number of columns
for col_num, col_name in enumerate(df.columns, start=1):
# last col gets a newline sep
_delim = delimiter if col_num != len(df.columns) else NEWLINE
_line = _space.join(
[
str(col_num), # Host file field order
SQLCHAR, # Host file data type
str(0), # Prefix length
str(0), # Host file data length
f'"{_escape(_delim)}"', # Terminator (see note below)
str(
col_num if not db_cols_order else db_cols_order[str(col_name)]
), # Server column order
str(col_name), # Server column name, optional as long as not blank
sql_collation, # Column collation
"\n",
]
)
format_file_str += _line
# FYI very important to surround the Terminator with quotes, otherwise BCP fails with:
# "Unexpected EOF encountered in BCP data-file". Hugely frustrating bug.
return format_file_str | 145 | 193 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 23:07:15 2019
@author: ydima
"""
import logging
import os
from pathlib import Path
import random
import shlex
import string
from subprocess import PIPE, Popen
import tempfile
from typing import Dict, List, Optional, Union
import pandas as pd
from .constants import (
DIRECTIONS,
IN,
IS_WIN32,
NEWLINE,
OUT,
QUERY,
QUERYOUT,
SQLCHAR,
TABLE,
VIEW,
BCPandasException,
BCPandasValueError,
read_data_settings,
sql_collation,
)
logger = logging.getLogger(__name__)
def bcp(
sql_item: str,
direction: str,
flat_file: str,
creds,
sql_type: str = "table",
schema: str = "dbo",
format_file_path: str = None,
batch_size: int = None,
col_delimiter: str = None,
row_terminator: str = None,
bcp_path: Union[str, Path] = None,
error_file_path: str = None
):
"""
See https://docs.microsoft.com/en-us/sql/tools/bcp-utility
"""
combos = {TABLE: [IN, OUT], QUERY: [QUERYOUT], VIEW: [IN, OUT]}
direc = direction.lower()
# validation
if direc not in DIRECTIONS:
raise BCPandasValueError(
f"Param 'direction' must be one of {DIRECTIONS}, you passed {direc}"
)
if direc not in combos[sql_type]:
raise BCPandasValueError(
f"Wrong combo of direction and SQL object, you passed {sql_type} and {direc} ."
)
# auth
if creds.with_krb_auth:
auth = ["-T"]
else:
auth = ["-U", creds.username, "-P", creds.password]
# prepare SQL item string
if sql_type == QUERY:
# remove newlines for queries, otherwise messes up BCP
sql_item_string = quote_this("".join(sql_item.splitlines()))
else:
sql_item_string = f"{schema}.{sql_item}"
# construct BCP command
bcp_command = [
"bcp" if bcp_path is None else quote_this(str(bcp_path)),
sql_item_string,
direc,
flat_file,
"-S",
creds.server,
"-d",
creds.database,
"-q", # Executes the SET QUOTED_IDENTIFIERS ON statement, needed for Azure SQL DW
"-e",
error_file_path
] + auth
if batch_size:
bcp_command += ["-b", str(batch_size)]
# formats
if direc == IN:
bcp_command += ["-f", format_file_path]
elif direc in (OUT, QUERYOUT):
bcp_command += [
"-c", # marking as character data, not Unicode (maybe make as param?)
quote_this(
f"-t{read_data_settings['delimiter'] if col_delimiter is None else col_delimiter}"
),
quote_this(
f"-r{read_data_settings['newline'] if row_terminator is None else row_terminator}"
),
]
# execute
bcp_command_log = [c if c != creds.password else "[REDACTED]" for c in bcp_command]
logger.info(f"Executing BCP command now... \nBCP command is: {bcp_command_log}")
ret_code = run_cmd(bcp_command)
if ret_code:
raise BCPandasException(f"Bcp command failed with exit code {ret_code}")
def get_temp_file() -> str:
"""
Returns full path to a temporary file without creating it.
"""
tmp_dir = tempfile.gettempdir()
file_path = os.path.join(
tmp_dir, "".join(random.choices(string.ascii_letters + string.digits, k=21))
)
return file_path
def _escape(input_string: str) -> str:
"""
Adopted from https://github.com/titan550/bcpy/blob/master/bcpy/format_file_builder.py#L25
"""
return (
input_string.replace('"', '\\"')
.replace("'", "\\'")
.replace("\r", "\\r")
.replace("\n", "\\n")
)
def build_format_file(
df: pd.DataFrame, delimiter: str, db_cols_order: Optional[Dict[str, int]] = None
) -> str:
"""
Creates the non-xml SQL format file. Puts 4 spaces between each section.
See https://docs.microsoft.com/en-us/sql/relational-databases/import-export/non-xml-format-files-sql-server
for the specification of the file.
# TODO add params/options to control:
# - the char type (not just SQLCHAR),
Parameters
----------
df : pandas DataFrame
delimiter : a valid delimiter character
db_cols_order : dict, optional
Dict of {database column name -> ordinal position of the column}.
Maps existing columns in the database to their ordinal position, i.e. the order of the columns in the db table.
1-indexed, so the first columns is 1, second is 2, etc.
Only needed if the order of the columns in the dataframe doesn't match the database.
Returns
-------
A string containing the format file
"""
_space = " " * 4
format_file_str = f"9.0\n{len(df.columns)}\n" # Version and Number of columns
for col_num, col_name in enumerate(df.columns, start=1):
# last col gets a newline sep
_delim = delimiter if col_num != len(df.columns) else NEWLINE
_line = _space.join(
[
str(col_num), # Host file field order
SQLCHAR, # Host file data type
str(0), # Prefix length
str(0), # Host file data length
f'"{_escape(_delim)}"', # Terminator (see note below)
str(
col_num if not db_cols_order else db_cols_order[str(col_name)]
), # Server column order
str(col_name), # Server column name, optional as long as not blank
sql_collation, # Column collation
"\n",
]
)
format_file_str += _line
# FYI very important to surround the Terminator with quotes, otherwise BCP fails with:
# "Unexpected EOF encountered in BCP data-file". Hugely frustrating bug.
return format_file_str
def quote_this(this: str, skip: bool = False) -> str:
"""
OS-safe way to quote a string.
Returns the string with quotes around it.
On Windows ~~it's double quotes~~ we skip quoting,
on Linux it's single quotes.
"""
if isinstance(this, str):
if IS_WIN32:
return this # TODO maybe change?
else:
return shlex.quote(this)
else:
return this
def run_cmd(cmd: List[str]) -> int:
"""
Runs the given command.
Prints STDOUT in real time, prints STDERR when command is complete,
and logs both STDOUT and STDERR.
Paramters
---------
cmd : list of str
The command to run, to be submitted to `subprocess.Popen()`
Returns
-------
The exit code of the command
"""
if IS_WIN32:
with_shell = False
else:
with_shell = True
cmd = " ".join(cmd) # type: ignore
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, encoding="utf-8", errors="utf-8", shell=with_shell,)
# live stream STDOUT
while True:
outs = proc.stdout.readline()
if outs:
print(outs, end="")
logger.info(outs)
if proc.poll() is not None and outs == "":
break
errs = proc.stderr.readlines()
if errs:
print(errs, end="")
logger.error(errs)
return proc.returncode
|
run_cmd | Runs the given command.
Prints STDOUT in real time, prints STDERR when command is complete,
and logs both STDOUT and STDERR.
Paramters
---------
cmd : list of str
The command to run, to be submitted to `subprocess.Popen()`
Returns
-------
The exit code of the command | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 23:07:15 2019
@author: ydima
"""
import logging
import os
from pathlib import Path
import random
import shlex
import string
from subprocess import PIPE, Popen
import tempfile
from typing import Dict, List, Optional, Union
import pandas as pd
from .constants import (
DIRECTIONS,
IN,
IS_WIN32,
NEWLINE,
OUT,
QUERY,
QUERYOUT,
SQLCHAR,
TABLE,
VIEW,
BCPandasException,
BCPandasValueError,
read_data_settings,
sql_collation,
)
logger = logging.getLogger(__name__)
def bcp(
sql_item: str,
direction: str,
flat_file: str,
creds,
sql_type: str = "table",
schema: str = "dbo",
format_file_path: str = None,
batch_size: int = None,
col_delimiter: str = None,
row_terminator: str = None,
bcp_path: Union[str, Path] = None,
error_file_path: str = None
):
"""
See https://docs.microsoft.com/en-us/sql/tools/bcp-utility
"""
combos = {TABLE: [IN, OUT], QUERY: [QUERYOUT], VIEW: [IN, OUT]}
direc = direction.lower()
# validation
if direc not in DIRECTIONS:
raise BCPandasValueError(
f"Param 'direction' must be one of {DIRECTIONS}, you passed {direc}"
)
if direc not in combos[sql_type]:
raise BCPandasValueError(
f"Wrong combo of direction and SQL object, you passed {sql_type} and {direc} ."
)
# auth
if creds.with_krb_auth:
auth = ["-T"]
else:
auth = ["-U", creds.username, "-P", creds.password]
# prepare SQL item string
if sql_type == QUERY:
# remove newlines for queries, otherwise messes up BCP
sql_item_string = quote_this("".join(sql_item.splitlines()))
else:
sql_item_string = f"{schema}.{sql_item}"
# construct BCP command
bcp_command = [
"bcp" if bcp_path is None else quote_this(str(bcp_path)),
sql_item_string,
direc,
flat_file,
"-S",
creds.server,
"-d",
creds.database,
"-q", # Executes the SET QUOTED_IDENTIFIERS ON statement, needed for Azure SQL DW
"-e",
error_file_path
] + auth
if batch_size:
bcp_command += ["-b", str(batch_size)]
# formats
if direc == IN:
bcp_command += ["-f", format_file_path]
elif direc in (OUT, QUERYOUT):
bcp_command += [
"-c", # marking as character data, not Unicode (maybe make as param?)
quote_this(
f"-t{read_data_settings['delimiter'] if col_delimiter is None else col_delimiter}"
),
quote_this(
f"-r{read_data_settings['newline'] if row_terminator is None else row_terminator}"
),
]
# execute
bcp_command_log = [c if c != creds.password else "[REDACTED]" for c in bcp_command]
logger.info(f"Executing BCP command now... \nBCP command is: {bcp_command_log}")
ret_code = run_cmd(bcp_command)
if ret_code:
raise BCPandasException(f"Bcp command failed with exit code {ret_code}")
def get_temp_file() -> str:
"""
Returns full path to a temporary file without creating it.
"""
tmp_dir = tempfile.gettempdir()
file_path = os.path.join(
tmp_dir, "".join(random.choices(string.ascii_letters + string.digits, k=21))
)
return file_path
def _escape(input_string: str) -> str:
"""
Adopted from https://github.com/titan550/bcpy/blob/master/bcpy/format_file_builder.py#L25
"""
return (
input_string.replace('"', '\\"')
.replace("'", "\\'")
.replace("\r", "\\r")
.replace("\n", "\\n")
)
def build_format_file(
df: pd.DataFrame, delimiter: str, db_cols_order: Optional[Dict[str, int]] = None
) -> str:
"""
Creates the non-xml SQL format file. Puts 4 spaces between each section.
See https://docs.microsoft.com/en-us/sql/relational-databases/import-export/non-xml-format-files-sql-server
for the specification of the file.
# TODO add params/options to control:
# - the char type (not just SQLCHAR),
Parameters
----------
df : pandas DataFrame
delimiter : a valid delimiter character
db_cols_order : dict, optional
Dict of {database column name -> ordinal position of the column}.
Maps existing columns in the database to their ordinal position, i.e. the order of the columns in the db table.
1-indexed, so the first columns is 1, second is 2, etc.
Only needed if the order of the columns in the dataframe doesn't match the database.
Returns
-------
A string containing the format file
"""
_space = " " * 4
format_file_str = f"9.0\n{len(df.columns)}\n" # Version and Number of columns
for col_num, col_name in enumerate(df.columns, start=1):
# last col gets a newline sep
_delim = delimiter if col_num != len(df.columns) else NEWLINE
_line = _space.join(
[
str(col_num), # Host file field order
SQLCHAR, # Host file data type
str(0), # Prefix length
str(0), # Host file data length
f'"{_escape(_delim)}"', # Terminator (see note below)
str(
col_num if not db_cols_order else db_cols_order[str(col_name)]
), # Server column order
str(col_name), # Server column name, optional as long as not blank
sql_collation, # Column collation
"\n",
]
)
format_file_str += _line
# FYI very important to surround the Terminator with quotes, otherwise BCP fails with:
# "Unexpected EOF encountered in BCP data-file". Hugely frustrating bug.
return format_file_str
def quote_this(this: str, skip: bool = False) -> str:
"""
OS-safe way to quote a string.
Returns the string with quotes around it.
On Windows ~~it's double quotes~~ we skip quoting,
on Linux it's single quotes.
"""
if isinstance(this, str):
if IS_WIN32:
return this # TODO maybe change?
else:
return shlex.quote(this)
else:
return this
# MASKED: run_cmd function (lines 213-247) | def run_cmd(cmd: List[str]) -> int:
"""
Runs the given command.
Prints STDOUT in real time, prints STDERR when command is complete,
and logs both STDOUT and STDERR.
Paramters
---------
cmd : list of str
The command to run, to be submitted to `subprocess.Popen()`
Returns
-------
The exit code of the command
"""
if IS_WIN32:
with_shell = False
else:
with_shell = True
cmd = " ".join(cmd) # type: ignore
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, encoding="utf-8", errors="utf-8", shell=with_shell,)
# live stream STDOUT
while True:
outs = proc.stdout.readline()
if outs:
print(outs, end="")
logger.info(outs)
if proc.poll() is not None and outs == "":
break
errs = proc.stderr.readlines()
if errs:
print(errs, end="")
logger.error(errs)
return proc.returncode | 213 | 247 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 23:07:15 2019
@author: ydima
"""
import logging
import os
from pathlib import Path
import random
import shlex
import string
from subprocess import PIPE, Popen
import tempfile
from typing import Dict, List, Optional, Union
import pandas as pd
from .constants import (
DIRECTIONS,
IN,
IS_WIN32,
NEWLINE,
OUT,
QUERY,
QUERYOUT,
SQLCHAR,
TABLE,
VIEW,
BCPandasException,
BCPandasValueError,
read_data_settings,
sql_collation,
)
logger = logging.getLogger(__name__)
def bcp(
sql_item: str,
direction: str,
flat_file: str,
creds,
sql_type: str = "table",
schema: str = "dbo",
format_file_path: str = None,
batch_size: int = None,
col_delimiter: str = None,
row_terminator: str = None,
bcp_path: Union[str, Path] = None,
error_file_path: str = None
):
"""
See https://docs.microsoft.com/en-us/sql/tools/bcp-utility
"""
combos = {TABLE: [IN, OUT], QUERY: [QUERYOUT], VIEW: [IN, OUT]}
direc = direction.lower()
# validation
if direc not in DIRECTIONS:
raise BCPandasValueError(
f"Param 'direction' must be one of {DIRECTIONS}, you passed {direc}"
)
if direc not in combos[sql_type]:
raise BCPandasValueError(
f"Wrong combo of direction and SQL object, you passed {sql_type} and {direc} ."
)
# auth
if creds.with_krb_auth:
auth = ["-T"]
else:
auth = ["-U", creds.username, "-P", creds.password]
# prepare SQL item string
if sql_type == QUERY:
# remove newlines for queries, otherwise messes up BCP
sql_item_string = quote_this("".join(sql_item.splitlines()))
else:
sql_item_string = f"{schema}.{sql_item}"
# construct BCP command
bcp_command = [
"bcp" if bcp_path is None else quote_this(str(bcp_path)),
sql_item_string,
direc,
flat_file,
"-S",
creds.server,
"-d",
creds.database,
"-q", # Executes the SET QUOTED_IDENTIFIERS ON statement, needed for Azure SQL DW
"-e",
error_file_path
] + auth
if batch_size:
bcp_command += ["-b", str(batch_size)]
# formats
if direc == IN:
bcp_command += ["-f", format_file_path]
elif direc in (OUT, QUERYOUT):
bcp_command += [
"-c", # marking as character data, not Unicode (maybe make as param?)
quote_this(
f"-t{read_data_settings['delimiter'] if col_delimiter is None else col_delimiter}"
),
quote_this(
f"-r{read_data_settings['newline'] if row_terminator is None else row_terminator}"
),
]
# execute
bcp_command_log = [c if c != creds.password else "[REDACTED]" for c in bcp_command]
logger.info(f"Executing BCP command now... \nBCP command is: {bcp_command_log}")
ret_code = run_cmd(bcp_command)
if ret_code:
raise BCPandasException(f"Bcp command failed with exit code {ret_code}")
def get_temp_file() -> str:
"""
Returns full path to a temporary file without creating it.
"""
tmp_dir = tempfile.gettempdir()
file_path = os.path.join(
tmp_dir, "".join(random.choices(string.ascii_letters + string.digits, k=21))
)
return file_path
def _escape(input_string: str) -> str:
"""
Adopted from https://github.com/titan550/bcpy/blob/master/bcpy/format_file_builder.py#L25
"""
return (
input_string.replace('"', '\\"')
.replace("'", "\\'")
.replace("\r", "\\r")
.replace("\n", "\\n")
)
def build_format_file(
df: pd.DataFrame, delimiter: str, db_cols_order: Optional[Dict[str, int]] = None
) -> str:
"""
Creates the non-xml SQL format file. Puts 4 spaces between each section.
See https://docs.microsoft.com/en-us/sql/relational-databases/import-export/non-xml-format-files-sql-server
for the specification of the file.
# TODO add params/options to control:
# - the char type (not just SQLCHAR),
Parameters
----------
df : pandas DataFrame
delimiter : a valid delimiter character
db_cols_order : dict, optional
Dict of {database column name -> ordinal position of the column}.
Maps existing columns in the database to their ordinal position, i.e. the order of the columns in the db table.
1-indexed, so the first columns is 1, second is 2, etc.
Only needed if the order of the columns in the dataframe doesn't match the database.
Returns
-------
A string containing the format file
"""
_space = " " * 4
format_file_str = f"9.0\n{len(df.columns)}\n" # Version and Number of columns
for col_num, col_name in enumerate(df.columns, start=1):
# last col gets a newline sep
_delim = delimiter if col_num != len(df.columns) else NEWLINE
_line = _space.join(
[
str(col_num), # Host file field order
SQLCHAR, # Host file data type
str(0), # Prefix length
str(0), # Host file data length
f'"{_escape(_delim)}"', # Terminator (see note below)
str(
col_num if not db_cols_order else db_cols_order[str(col_name)]
), # Server column order
str(col_name), # Server column name, optional as long as not blank
sql_collation, # Column collation
"\n",
]
)
format_file_str += _line
# FYI very important to surround the Terminator with quotes, otherwise BCP fails with:
# "Unexpected EOF encountered in BCP data-file". Hugely frustrating bug.
return format_file_str
def quote_this(this: str, skip: bool = False) -> str:
"""
OS-safe way to quote a string.
Returns the string with quotes around it.
On Windows ~~it's double quotes~~ we skip quoting,
on Linux it's single quotes.
"""
if isinstance(this, str):
if IS_WIN32:
return this # TODO maybe change?
else:
return shlex.quote(this)
else:
return this
def run_cmd(cmd: List[str]) -> int:
"""
Runs the given command.
Prints STDOUT in real time, prints STDERR when command is complete,
and logs both STDOUT and STDERR.
Paramters
---------
cmd : list of str
The command to run, to be submitted to `subprocess.Popen()`
Returns
-------
The exit code of the command
"""
if IS_WIN32:
with_shell = False
else:
with_shell = True
cmd = " ".join(cmd) # type: ignore
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, encoding="utf-8", errors="utf-8", shell=with_shell,)
# live stream STDOUT
while True:
outs = proc.stdout.readline()
if outs:
print(outs, end="")
logger.info(outs)
if proc.poll() is not None and outs == "":
break
errs = proc.stderr.readlines()
if errs:
print(errs, end="")
logger.error(errs)
return proc.returncode
|
_setupDistribServer | Set up a resource on a distrib site using L{ResourcePublisher}.
@param child: The resource to publish using distrib.
@return: A tuple consisting of the host and port on which to contact
the created site. | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import filepath, failure
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.spread.banana import SIZE_LIMIT
from twisted.web import distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest, DummyChannel
from twisted.web.test._util import _render
from twisted.test import proto_helpers
from twisted.web.http_headers import Headers
from twisted.logger import globalLogPublisher
class MySite(server.Site):
pass
class PBServerFactory(pb.PBServerFactory):
"""
A PB server factory which keeps track of the most recent protocol it
created.
@ivar proto: L{None} or the L{Broker} instance most recently returned
from C{buildProtocol}.
"""
proto = None
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class ArbitraryError(Exception):
"""
An exception for this test.
"""
class DistribTests(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
"""
Clean up all the event sources left behind by either directly by
test methods or indirectly via some distrib API.
"""
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None and self.f1.proto is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None and self.sub.publisher is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild(b"there", static.Data(b"root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild(b"here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
agent = client.Agent(reactor)
url = "http://127.0.0.1:{}/here/there".format(
self.port2.getHost().port)
url = url.encode("ascii")
d = agent.request(b"GET", url)
d.addCallback(client.readBody)
d.addCallback(self.assertEqual, b'root')
return d
# MASKED: _setupDistribServer function (lines 109-135)
def _requestTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with the result of the request.
"""
mainPort, mainAddr = self._setupDistribServer(child)
agent = client.Agent(reactor)
url = "http://%s:%s/child" % (mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = agent.request(b"GET", url, **kwargs)
d.addCallback(client.readBody)
return d
def _requestAgentTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with a tuple consisting of a
L{twisted.test.proto_helpers.AccumulatingProtocol} containing the
body of the response and an L{IResponse} with the response itself.
"""
mainPort, mainAddr = self._setupDistribServer(child)
url = "http://{}:{}/child".format(mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = client.Agent(reactor).request(b"GET", url, **kwargs)
def cbCollectBody(response):
protocol = proto_helpers.AccumulatingProtocol()
response.deliverBody(protocol)
d = protocol.closedDeferred = defer.Deferred()
d.addCallback(lambda _: (protocol, response))
return d
d.addCallback(cbCollectBody)
return d
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
logObserver = proto_helpers.EventLoggingObserver()
globalLogPublisher.addObserver(logObserver)
req = [None]
class ReportRequestHeaders(resource.Resource):
def render(self, request):
req[0] = request
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return b""
def check_logs():
msgs = [e["log_format"] for e in logObserver]
self.assertIn('connected to publisher', msgs)
self.assertIn(
"could not connect to distributed web service: {msg}",
msgs
)
self.assertIn(req[0], msgs)
globalLogPublisher.removeObserver(logObserver)
request = self._requestTest(
ReportRequestHeaders(), headers=Headers({'foo': ['bar']}))
def cbRequested(result):
self.f1.proto.notifyOnDisconnect(check_logs)
self.assertEqual(requestHeaders[b'Foo'], [b'bar'])
request.addCallback(cbRequested)
return request
def test_requestResponseCode(self):
"""
The response code can be set by the request object passed to a
distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200)
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"OK")
request.addCallback(cbRequested)
return request
def test_requestResponseCodeMessage(self):
"""
The response code and message can be set by the request object passed to
a distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200, b"some-message")
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"some-message")
request.addCallback(cbRequested)
return request
def test_largeWrite(self):
"""
If a string longer than the Banana size limit is passed to the
L{distrib.Request} passed to the remote resource, it is broken into
smaller strings to be transported over the PB connection.
"""
class LargeWrite(resource.Resource):
def render(self, request):
request.write(b'x' * SIZE_LIMIT + b'y')
request.finish()
return server.NOT_DONE_YET
request = self._requestTest(LargeWrite())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_largeReturn(self):
"""
Like L{test_largeWrite}, but for the case where C{render} returns a
long string rather than explicitly passing it to L{Request.write}.
"""
class LargeReturn(resource.Resource):
def render(self, request):
return b'x' * SIZE_LIMIT + b'y'
request = self._requestTest(LargeReturn())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([b''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
errors = self.flushLoggedErrors(pb.NoSuchMethod)
self.assertEqual(len(errors), 1)
# The error page is rendered as HTML.
expected = [
b'',
b'<html>',
b' <head><title>500 - Server Connection Lost</title></head>',
b' <body>',
b' <h1>Server Connection Lost</h1>',
b' <p>Connection to distributed server lost:'
b'<pre>'
b'[Failure instance: Traceback from remote host -- '
b'twisted.spread.flavors.NoSuchMethod: '
b'No such method: remote_request',
b']</pre></p>',
b' </body>',
b'</html>',
b''
]
self.assertEqual([b'\n'.join(expected)], request.written)
d.addCallback(cbRendered)
return d
def test_logFailed(self):
"""
When a request fails, the string form of the failure is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
f = failure.Failure(ArbitraryError())
request = DummyRequest([b''])
issue = distrib.Issue(request)
issue.failed(f)
self.assertEquals(1, len(logObserver))
self.assertIn(
"Failure instance",
logObserver[0]["log_format"]
)
def test_requestFail(self):
"""
When L{twisted.web.distrib.Request}'s fail is called, the failure
is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
err = ArbitraryError()
f = failure.Failure(err)
req = distrib.Request(DummyChannel())
req.fail(f)
self.flushLoggedErrors(ArbitraryError)
self.assertEquals(1, len(logObserver))
self.assertIs(logObserver[0]["log_failure"], f)
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path)
def test_invalidMethod(self):
"""
L{UserDirectory.render} raises L{UnsupportedMethod} in response to a
non-I{GET} request.
"""
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(
server.UnsupportedMethod, self.directory.render, request)
def test_render(self):
"""
L{UserDirectory} renders a list of links to available user content
in response to a I{GET} request.
"""
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# This really only works if it's a unix socket, but the implementation
# doesn't currently check for that. It probably should someday, and
# then skip users with non-sockets.
web.child('.twistd-web-pb').setContent(b"")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(b''.join(request.written))
# Each user should have an li with a link to their page.
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
"""
If L{UserDirectory} is instantiated with no arguments, it uses the
L{pwd} module as its password database.
"""
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
| def _setupDistribServer(self, child):
"""
Set up a resource on a distrib site using L{ResourcePublisher}.
@param child: The resource to publish using distrib.
@return: A tuple consisting of the host and port on which to contact
the created site.
"""
distribRoot = resource.Resource()
distribRoot.putChild(b"child", child)
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
return mainPort, mainAddr | 109 | 135 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import filepath, failure
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.spread.banana import SIZE_LIMIT
from twisted.web import distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest, DummyChannel
from twisted.web.test._util import _render
from twisted.test import proto_helpers
from twisted.web.http_headers import Headers
from twisted.logger import globalLogPublisher
class MySite(server.Site):
pass
class PBServerFactory(pb.PBServerFactory):
"""
A PB server factory which keeps track of the most recent protocol it
created.
@ivar proto: L{None} or the L{Broker} instance most recently returned
from C{buildProtocol}.
"""
proto = None
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class ArbitraryError(Exception):
"""
An exception for this test.
"""
class DistribTests(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
"""
Clean up all the event sources left behind by either directly by
test methods or indirectly via some distrib API.
"""
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None and self.f1.proto is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None and self.sub.publisher is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild(b"there", static.Data(b"root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild(b"here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
agent = client.Agent(reactor)
url = "http://127.0.0.1:{}/here/there".format(
self.port2.getHost().port)
url = url.encode("ascii")
d = agent.request(b"GET", url)
d.addCallback(client.readBody)
d.addCallback(self.assertEqual, b'root')
return d
def _setupDistribServer(self, child):
"""
Set up a resource on a distrib site using L{ResourcePublisher}.
@param child: The resource to publish using distrib.
@return: A tuple consisting of the host and port on which to contact
the created site.
"""
distribRoot = resource.Resource()
distribRoot.putChild(b"child", child)
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
return mainPort, mainAddr
def _requestTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with the result of the request.
"""
mainPort, mainAddr = self._setupDistribServer(child)
agent = client.Agent(reactor)
url = "http://%s:%s/child" % (mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = agent.request(b"GET", url, **kwargs)
d.addCallback(client.readBody)
return d
def _requestAgentTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with a tuple consisting of a
L{twisted.test.proto_helpers.AccumulatingProtocol} containing the
body of the response and an L{IResponse} with the response itself.
"""
mainPort, mainAddr = self._setupDistribServer(child)
url = "http://{}:{}/child".format(mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = client.Agent(reactor).request(b"GET", url, **kwargs)
def cbCollectBody(response):
protocol = proto_helpers.AccumulatingProtocol()
response.deliverBody(protocol)
d = protocol.closedDeferred = defer.Deferred()
d.addCallback(lambda _: (protocol, response))
return d
d.addCallback(cbCollectBody)
return d
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
logObserver = proto_helpers.EventLoggingObserver()
globalLogPublisher.addObserver(logObserver)
req = [None]
class ReportRequestHeaders(resource.Resource):
def render(self, request):
req[0] = request
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return b""
def check_logs():
msgs = [e["log_format"] for e in logObserver]
self.assertIn('connected to publisher', msgs)
self.assertIn(
"could not connect to distributed web service: {msg}",
msgs
)
self.assertIn(req[0], msgs)
globalLogPublisher.removeObserver(logObserver)
request = self._requestTest(
ReportRequestHeaders(), headers=Headers({'foo': ['bar']}))
def cbRequested(result):
self.f1.proto.notifyOnDisconnect(check_logs)
self.assertEqual(requestHeaders[b'Foo'], [b'bar'])
request.addCallback(cbRequested)
return request
def test_requestResponseCode(self):
"""
The response code can be set by the request object passed to a
distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200)
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"OK")
request.addCallback(cbRequested)
return request
def test_requestResponseCodeMessage(self):
"""
The response code and message can be set by the request object passed to
a distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200, b"some-message")
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"some-message")
request.addCallback(cbRequested)
return request
def test_largeWrite(self):
"""
If a string longer than the Banana size limit is passed to the
L{distrib.Request} passed to the remote resource, it is broken into
smaller strings to be transported over the PB connection.
"""
class LargeWrite(resource.Resource):
def render(self, request):
request.write(b'x' * SIZE_LIMIT + b'y')
request.finish()
return server.NOT_DONE_YET
request = self._requestTest(LargeWrite())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_largeReturn(self):
"""
Like L{test_largeWrite}, but for the case where C{render} returns a
long string rather than explicitly passing it to L{Request.write}.
"""
class LargeReturn(resource.Resource):
def render(self, request):
return b'x' * SIZE_LIMIT + b'y'
request = self._requestTest(LargeReturn())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([b''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
errors = self.flushLoggedErrors(pb.NoSuchMethod)
self.assertEqual(len(errors), 1)
# The error page is rendered as HTML.
expected = [
b'',
b'<html>',
b' <head><title>500 - Server Connection Lost</title></head>',
b' <body>',
b' <h1>Server Connection Lost</h1>',
b' <p>Connection to distributed server lost:'
b'<pre>'
b'[Failure instance: Traceback from remote host -- '
b'twisted.spread.flavors.NoSuchMethod: '
b'No such method: remote_request',
b']</pre></p>',
b' </body>',
b'</html>',
b''
]
self.assertEqual([b'\n'.join(expected)], request.written)
d.addCallback(cbRendered)
return d
def test_logFailed(self):
"""
When a request fails, the string form of the failure is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
f = failure.Failure(ArbitraryError())
request = DummyRequest([b''])
issue = distrib.Issue(request)
issue.failed(f)
self.assertEquals(1, len(logObserver))
self.assertIn(
"Failure instance",
logObserver[0]["log_format"]
)
def test_requestFail(self):
"""
When L{twisted.web.distrib.Request}'s fail is called, the failure
is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
err = ArbitraryError()
f = failure.Failure(err)
req = distrib.Request(DummyChannel())
req.fail(f)
self.flushLoggedErrors(ArbitraryError)
self.assertEquals(1, len(logObserver))
self.assertIs(logObserver[0]["log_failure"], f)
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path)
def test_invalidMethod(self):
"""
L{UserDirectory.render} raises L{UnsupportedMethod} in response to a
non-I{GET} request.
"""
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(
server.UnsupportedMethod, self.directory.render, request)
def test_render(self):
"""
L{UserDirectory} renders a list of links to available user content
in response to a I{GET} request.
"""
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# This really only works if it's a unix socket, but the implementation
# doesn't currently check for that. It probably should someday, and
# then skip users with non-sockets.
web.child('.twistd-web-pb').setContent(b"")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(b''.join(request.written))
# Each user should have an li with a link to their page.
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
"""
If L{UserDirectory} is instantiated with no arguments, it uses the
L{pwd} module as its password database.
"""
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
|
test_getPublicHTMLChild | L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory. | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import filepath, failure
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.spread.banana import SIZE_LIMIT
from twisted.web import distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest, DummyChannel
from twisted.web.test._util import _render
from twisted.test import proto_helpers
from twisted.web.http_headers import Headers
from twisted.logger import globalLogPublisher
class MySite(server.Site):
pass
class PBServerFactory(pb.PBServerFactory):
"""
A PB server factory which keeps track of the most recent protocol it
created.
@ivar proto: L{None} or the L{Broker} instance most recently returned
from C{buildProtocol}.
"""
proto = None
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class ArbitraryError(Exception):
"""
An exception for this test.
"""
class DistribTests(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
"""
Clean up all the event sources left behind by either directly by
test methods or indirectly via some distrib API.
"""
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None and self.f1.proto is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None and self.sub.publisher is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild(b"there", static.Data(b"root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild(b"here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
agent = client.Agent(reactor)
url = "http://127.0.0.1:{}/here/there".format(
self.port2.getHost().port)
url = url.encode("ascii")
d = agent.request(b"GET", url)
d.addCallback(client.readBody)
d.addCallback(self.assertEqual, b'root')
return d
def _setupDistribServer(self, child):
"""
Set up a resource on a distrib site using L{ResourcePublisher}.
@param child: The resource to publish using distrib.
@return: A tuple consisting of the host and port on which to contact
the created site.
"""
distribRoot = resource.Resource()
distribRoot.putChild(b"child", child)
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
return mainPort, mainAddr
def _requestTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with the result of the request.
"""
mainPort, mainAddr = self._setupDistribServer(child)
agent = client.Agent(reactor)
url = "http://%s:%s/child" % (mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = agent.request(b"GET", url, **kwargs)
d.addCallback(client.readBody)
return d
def _requestAgentTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with a tuple consisting of a
L{twisted.test.proto_helpers.AccumulatingProtocol} containing the
body of the response and an L{IResponse} with the response itself.
"""
mainPort, mainAddr = self._setupDistribServer(child)
url = "http://{}:{}/child".format(mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = client.Agent(reactor).request(b"GET", url, **kwargs)
def cbCollectBody(response):
protocol = proto_helpers.AccumulatingProtocol()
response.deliverBody(protocol)
d = protocol.closedDeferred = defer.Deferred()
d.addCallback(lambda _: (protocol, response))
return d
d.addCallback(cbCollectBody)
return d
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
logObserver = proto_helpers.EventLoggingObserver()
globalLogPublisher.addObserver(logObserver)
req = [None]
class ReportRequestHeaders(resource.Resource):
def render(self, request):
req[0] = request
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return b""
def check_logs():
msgs = [e["log_format"] for e in logObserver]
self.assertIn('connected to publisher', msgs)
self.assertIn(
"could not connect to distributed web service: {msg}",
msgs
)
self.assertIn(req[0], msgs)
globalLogPublisher.removeObserver(logObserver)
request = self._requestTest(
ReportRequestHeaders(), headers=Headers({'foo': ['bar']}))
def cbRequested(result):
self.f1.proto.notifyOnDisconnect(check_logs)
self.assertEqual(requestHeaders[b'Foo'], [b'bar'])
request.addCallback(cbRequested)
return request
def test_requestResponseCode(self):
"""
The response code can be set by the request object passed to a
distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200)
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"OK")
request.addCallback(cbRequested)
return request
def test_requestResponseCodeMessage(self):
"""
The response code and message can be set by the request object passed to
a distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200, b"some-message")
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"some-message")
request.addCallback(cbRequested)
return request
def test_largeWrite(self):
"""
If a string longer than the Banana size limit is passed to the
L{distrib.Request} passed to the remote resource, it is broken into
smaller strings to be transported over the PB connection.
"""
class LargeWrite(resource.Resource):
def render(self, request):
request.write(b'x' * SIZE_LIMIT + b'y')
request.finish()
return server.NOT_DONE_YET
request = self._requestTest(LargeWrite())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_largeReturn(self):
"""
Like L{test_largeWrite}, but for the case where C{render} returns a
long string rather than explicitly passing it to L{Request.write}.
"""
class LargeReturn(resource.Resource):
def render(self, request):
return b'x' * SIZE_LIMIT + b'y'
request = self._requestTest(LargeReturn())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([b''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
errors = self.flushLoggedErrors(pb.NoSuchMethod)
self.assertEqual(len(errors), 1)
# The error page is rendered as HTML.
expected = [
b'',
b'<html>',
b' <head><title>500 - Server Connection Lost</title></head>',
b' <body>',
b' <h1>Server Connection Lost</h1>',
b' <p>Connection to distributed server lost:'
b'<pre>'
b'[Failure instance: Traceback from remote host -- '
b'twisted.spread.flavors.NoSuchMethod: '
b'No such method: remote_request',
b']</pre></p>',
b' </body>',
b'</html>',
b''
]
self.assertEqual([b'\n'.join(expected)], request.written)
d.addCallback(cbRendered)
return d
def test_logFailed(self):
"""
When a request fails, the string form of the failure is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
f = failure.Failure(ArbitraryError())
request = DummyRequest([b''])
issue = distrib.Issue(request)
issue.failed(f)
self.assertEquals(1, len(logObserver))
self.assertIn(
"Failure instance",
logObserver[0]["log_format"]
)
def test_requestFail(self):
"""
When L{twisted.web.distrib.Request}'s fail is called, the failure
is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
err = ArbitraryError()
f = failure.Failure(err)
req = distrib.Request(DummyChannel())
req.fail(f)
self.flushLoggedErrors(ArbitraryError)
self.assertEquals(1, len(logObserver))
self.assertIs(logObserver[0]["log_failure"], f)
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
# MASKED: test_getPublicHTMLChild function (lines 444-456)
def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path)
def test_invalidMethod(self):
"""
L{UserDirectory.render} raises L{UnsupportedMethod} in response to a
non-I{GET} request.
"""
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(
server.UnsupportedMethod, self.directory.render, request)
def test_render(self):
"""
L{UserDirectory} renders a list of links to available user content
in response to a I{GET} request.
"""
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# This really only works if it's a unix socket, but the implementation
# doesn't currently check for that. It probably should someday, and
# then skip users with non-sockets.
web.child('.twistd-web-pb').setContent(b"")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(b''.join(request.written))
# Each user should have an li with a link to their page.
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
"""
If L{UserDirectory} is instantiated with no arguments, it uses the
L{pwd} module as its password database.
"""
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
| def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path) | 444 | 456 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import filepath, failure
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.spread.banana import SIZE_LIMIT
from twisted.web import distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest, DummyChannel
from twisted.web.test._util import _render
from twisted.test import proto_helpers
from twisted.web.http_headers import Headers
from twisted.logger import globalLogPublisher
class MySite(server.Site):
pass
class PBServerFactory(pb.PBServerFactory):
"""
A PB server factory which keeps track of the most recent protocol it
created.
@ivar proto: L{None} or the L{Broker} instance most recently returned
from C{buildProtocol}.
"""
proto = None
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class ArbitraryError(Exception):
"""
An exception for this test.
"""
class DistribTests(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
"""
Clean up all the event sources left behind by either directly by
test methods or indirectly via some distrib API.
"""
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None and self.f1.proto is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None and self.sub.publisher is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild(b"there", static.Data(b"root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild(b"here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
agent = client.Agent(reactor)
url = "http://127.0.0.1:{}/here/there".format(
self.port2.getHost().port)
url = url.encode("ascii")
d = agent.request(b"GET", url)
d.addCallback(client.readBody)
d.addCallback(self.assertEqual, b'root')
return d
def _setupDistribServer(self, child):
"""
Set up a resource on a distrib site using L{ResourcePublisher}.
@param child: The resource to publish using distrib.
@return: A tuple consisting of the host and port on which to contact
the created site.
"""
distribRoot = resource.Resource()
distribRoot.putChild(b"child", child)
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
return mainPort, mainAddr
def _requestTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with the result of the request.
"""
mainPort, mainAddr = self._setupDistribServer(child)
agent = client.Agent(reactor)
url = "http://%s:%s/child" % (mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = agent.request(b"GET", url, **kwargs)
d.addCallback(client.readBody)
return d
def _requestAgentTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with a tuple consisting of a
L{twisted.test.proto_helpers.AccumulatingProtocol} containing the
body of the response and an L{IResponse} with the response itself.
"""
mainPort, mainAddr = self._setupDistribServer(child)
url = "http://{}:{}/child".format(mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = client.Agent(reactor).request(b"GET", url, **kwargs)
def cbCollectBody(response):
protocol = proto_helpers.AccumulatingProtocol()
response.deliverBody(protocol)
d = protocol.closedDeferred = defer.Deferred()
d.addCallback(lambda _: (protocol, response))
return d
d.addCallback(cbCollectBody)
return d
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
logObserver = proto_helpers.EventLoggingObserver()
globalLogPublisher.addObserver(logObserver)
req = [None]
class ReportRequestHeaders(resource.Resource):
def render(self, request):
req[0] = request
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return b""
def check_logs():
msgs = [e["log_format"] for e in logObserver]
self.assertIn('connected to publisher', msgs)
self.assertIn(
"could not connect to distributed web service: {msg}",
msgs
)
self.assertIn(req[0], msgs)
globalLogPublisher.removeObserver(logObserver)
request = self._requestTest(
ReportRequestHeaders(), headers=Headers({'foo': ['bar']}))
def cbRequested(result):
self.f1.proto.notifyOnDisconnect(check_logs)
self.assertEqual(requestHeaders[b'Foo'], [b'bar'])
request.addCallback(cbRequested)
return request
def test_requestResponseCode(self):
"""
The response code can be set by the request object passed to a
distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200)
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"OK")
request.addCallback(cbRequested)
return request
def test_requestResponseCodeMessage(self):
"""
The response code and message can be set by the request object passed to
a distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200, b"some-message")
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"some-message")
request.addCallback(cbRequested)
return request
def test_largeWrite(self):
"""
If a string longer than the Banana size limit is passed to the
L{distrib.Request} passed to the remote resource, it is broken into
smaller strings to be transported over the PB connection.
"""
class LargeWrite(resource.Resource):
def render(self, request):
request.write(b'x' * SIZE_LIMIT + b'y')
request.finish()
return server.NOT_DONE_YET
request = self._requestTest(LargeWrite())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_largeReturn(self):
"""
Like L{test_largeWrite}, but for the case where C{render} returns a
long string rather than explicitly passing it to L{Request.write}.
"""
class LargeReturn(resource.Resource):
def render(self, request):
return b'x' * SIZE_LIMIT + b'y'
request = self._requestTest(LargeReturn())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([b''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
errors = self.flushLoggedErrors(pb.NoSuchMethod)
self.assertEqual(len(errors), 1)
# The error page is rendered as HTML.
expected = [
b'',
b'<html>',
b' <head><title>500 - Server Connection Lost</title></head>',
b' <body>',
b' <h1>Server Connection Lost</h1>',
b' <p>Connection to distributed server lost:'
b'<pre>'
b'[Failure instance: Traceback from remote host -- '
b'twisted.spread.flavors.NoSuchMethod: '
b'No such method: remote_request',
b']</pre></p>',
b' </body>',
b'</html>',
b''
]
self.assertEqual([b'\n'.join(expected)], request.written)
d.addCallback(cbRendered)
return d
def test_logFailed(self):
"""
When a request fails, the string form of the failure is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
f = failure.Failure(ArbitraryError())
request = DummyRequest([b''])
issue = distrib.Issue(request)
issue.failed(f)
self.assertEquals(1, len(logObserver))
self.assertIn(
"Failure instance",
logObserver[0]["log_format"]
)
def test_requestFail(self):
"""
When L{twisted.web.distrib.Request}'s fail is called, the failure
is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
err = ArbitraryError()
f = failure.Failure(err)
req = distrib.Request(DummyChannel())
req.fail(f)
self.flushLoggedErrors(ArbitraryError)
self.assertEquals(1, len(logObserver))
self.assertIs(logObserver[0]["log_failure"], f)
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path)
def test_invalidMethod(self):
"""
L{UserDirectory.render} raises L{UnsupportedMethod} in response to a
non-I{GET} request.
"""
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(
server.UnsupportedMethod, self.directory.render, request)
def test_render(self):
"""
L{UserDirectory} renders a list of links to available user content
in response to a I{GET} request.
"""
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# This really only works if it's a unix socket, but the implementation
# doesn't currently check for that. It probably should someday, and
# then skip users with non-sockets.
web.child('.twistd-web-pb').setContent(b"")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(b''.join(request.written))
# Each user should have an li with a link to their page.
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
"""
If L{UserDirectory} is instantiated with no arguments, it uses the
L{pwd} module as its password database.
"""
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
|
test_getDistribChild | L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket. | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import filepath, failure
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.spread.banana import SIZE_LIMIT
from twisted.web import distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest, DummyChannel
from twisted.web.test._util import _render
from twisted.test import proto_helpers
from twisted.web.http_headers import Headers
from twisted.logger import globalLogPublisher
class MySite(server.Site):
pass
class PBServerFactory(pb.PBServerFactory):
"""
A PB server factory which keeps track of the most recent protocol it
created.
@ivar proto: L{None} or the L{Broker} instance most recently returned
from C{buildProtocol}.
"""
proto = None
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class ArbitraryError(Exception):
"""
An exception for this test.
"""
class DistribTests(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
"""
Clean up all the event sources left behind by either directly by
test methods or indirectly via some distrib API.
"""
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None and self.f1.proto is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None and self.sub.publisher is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild(b"there", static.Data(b"root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild(b"here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
agent = client.Agent(reactor)
url = "http://127.0.0.1:{}/here/there".format(
self.port2.getHost().port)
url = url.encode("ascii")
d = agent.request(b"GET", url)
d.addCallback(client.readBody)
d.addCallback(self.assertEqual, b'root')
return d
def _setupDistribServer(self, child):
"""
Set up a resource on a distrib site using L{ResourcePublisher}.
@param child: The resource to publish using distrib.
@return: A tuple consisting of the host and port on which to contact
the created site.
"""
distribRoot = resource.Resource()
distribRoot.putChild(b"child", child)
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
return mainPort, mainAddr
def _requestTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with the result of the request.
"""
mainPort, mainAddr = self._setupDistribServer(child)
agent = client.Agent(reactor)
url = "http://%s:%s/child" % (mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = agent.request(b"GET", url, **kwargs)
d.addCallback(client.readBody)
return d
def _requestAgentTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with a tuple consisting of a
L{twisted.test.proto_helpers.AccumulatingProtocol} containing the
body of the response and an L{IResponse} with the response itself.
"""
mainPort, mainAddr = self._setupDistribServer(child)
url = "http://{}:{}/child".format(mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = client.Agent(reactor).request(b"GET", url, **kwargs)
def cbCollectBody(response):
protocol = proto_helpers.AccumulatingProtocol()
response.deliverBody(protocol)
d = protocol.closedDeferred = defer.Deferred()
d.addCallback(lambda _: (protocol, response))
return d
d.addCallback(cbCollectBody)
return d
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
logObserver = proto_helpers.EventLoggingObserver()
globalLogPublisher.addObserver(logObserver)
req = [None]
class ReportRequestHeaders(resource.Resource):
def render(self, request):
req[0] = request
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return b""
def check_logs():
msgs = [e["log_format"] for e in logObserver]
self.assertIn('connected to publisher', msgs)
self.assertIn(
"could not connect to distributed web service: {msg}",
msgs
)
self.assertIn(req[0], msgs)
globalLogPublisher.removeObserver(logObserver)
request = self._requestTest(
ReportRequestHeaders(), headers=Headers({'foo': ['bar']}))
def cbRequested(result):
self.f1.proto.notifyOnDisconnect(check_logs)
self.assertEqual(requestHeaders[b'Foo'], [b'bar'])
request.addCallback(cbRequested)
return request
def test_requestResponseCode(self):
"""
The response code can be set by the request object passed to a
distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200)
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"OK")
request.addCallback(cbRequested)
return request
def test_requestResponseCodeMessage(self):
"""
The response code and message can be set by the request object passed to
a distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200, b"some-message")
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"some-message")
request.addCallback(cbRequested)
return request
def test_largeWrite(self):
"""
If a string longer than the Banana size limit is passed to the
L{distrib.Request} passed to the remote resource, it is broken into
smaller strings to be transported over the PB connection.
"""
class LargeWrite(resource.Resource):
def render(self, request):
request.write(b'x' * SIZE_LIMIT + b'y')
request.finish()
return server.NOT_DONE_YET
request = self._requestTest(LargeWrite())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_largeReturn(self):
"""
Like L{test_largeWrite}, but for the case where C{render} returns a
long string rather than explicitly passing it to L{Request.write}.
"""
class LargeReturn(resource.Resource):
def render(self, request):
return b'x' * SIZE_LIMIT + b'y'
request = self._requestTest(LargeReturn())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([b''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
errors = self.flushLoggedErrors(pb.NoSuchMethod)
self.assertEqual(len(errors), 1)
# The error page is rendered as HTML.
expected = [
b'',
b'<html>',
b' <head><title>500 - Server Connection Lost</title></head>',
b' <body>',
b' <h1>Server Connection Lost</h1>',
b' <p>Connection to distributed server lost:'
b'<pre>'
b'[Failure instance: Traceback from remote host -- '
b'twisted.spread.flavors.NoSuchMethod: '
b'No such method: remote_request',
b']</pre></p>',
b' </body>',
b'</html>',
b''
]
self.assertEqual([b'\n'.join(expected)], request.written)
d.addCallback(cbRendered)
return d
def test_logFailed(self):
"""
When a request fails, the string form of the failure is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
f = failure.Failure(ArbitraryError())
request = DummyRequest([b''])
issue = distrib.Issue(request)
issue.failed(f)
self.assertEquals(1, len(logObserver))
self.assertIn(
"Failure instance",
logObserver[0]["log_format"]
)
def test_requestFail(self):
"""
When L{twisted.web.distrib.Request}'s fail is called, the failure
is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
err = ArbitraryError()
f = failure.Failure(err)
req = distrib.Request(DummyChannel())
req.fail(f)
self.flushLoggedErrors(ArbitraryError)
self.assertEquals(1, len(logObserver))
self.assertIs(logObserver[0]["log_failure"], f)
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
# MASKED: test_getDistribChild function (lines 459-472)
def test_invalidMethod(self):
"""
L{UserDirectory.render} raises L{UnsupportedMethod} in response to a
non-I{GET} request.
"""
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(
server.UnsupportedMethod, self.directory.render, request)
def test_render(self):
"""
L{UserDirectory} renders a list of links to available user content
in response to a I{GET} request.
"""
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# This really only works if it's a unix socket, but the implementation
# doesn't currently check for that. It probably should someday, and
# then skip users with non-sockets.
web.child('.twistd-web-pb').setContent(b"")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(b''.join(request.written))
# Each user should have an li with a link to their page.
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
"""
If L{UserDirectory} is instantiated with no arguments, it uses the
L{pwd} module as its password database.
"""
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
| def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path) | 459 | 472 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import filepath, failure
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.spread.banana import SIZE_LIMIT
from twisted.web import distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest, DummyChannel
from twisted.web.test._util import _render
from twisted.test import proto_helpers
from twisted.web.http_headers import Headers
from twisted.logger import globalLogPublisher
class MySite(server.Site):
pass
class PBServerFactory(pb.PBServerFactory):
"""
A PB server factory which keeps track of the most recent protocol it
created.
@ivar proto: L{None} or the L{Broker} instance most recently returned
from C{buildProtocol}.
"""
proto = None
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class ArbitraryError(Exception):
"""
An exception for this test.
"""
class DistribTests(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
"""
Clean up all the event sources left behind by either directly by
test methods or indirectly via some distrib API.
"""
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None and self.f1.proto is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None and self.sub.publisher is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild(b"there", static.Data(b"root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild(b"here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
agent = client.Agent(reactor)
url = "http://127.0.0.1:{}/here/there".format(
self.port2.getHost().port)
url = url.encode("ascii")
d = agent.request(b"GET", url)
d.addCallback(client.readBody)
d.addCallback(self.assertEqual, b'root')
return d
def _setupDistribServer(self, child):
"""
Set up a resource on a distrib site using L{ResourcePublisher}.
@param child: The resource to publish using distrib.
@return: A tuple consisting of the host and port on which to contact
the created site.
"""
distribRoot = resource.Resource()
distribRoot.putChild(b"child", child)
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
return mainPort, mainAddr
def _requestTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with the result of the request.
"""
mainPort, mainAddr = self._setupDistribServer(child)
agent = client.Agent(reactor)
url = "http://%s:%s/child" % (mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = agent.request(b"GET", url, **kwargs)
d.addCallback(client.readBody)
return d
def _requestAgentTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with a tuple consisting of a
L{twisted.test.proto_helpers.AccumulatingProtocol} containing the
body of the response and an L{IResponse} with the response itself.
"""
mainPort, mainAddr = self._setupDistribServer(child)
url = "http://{}:{}/child".format(mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = client.Agent(reactor).request(b"GET", url, **kwargs)
def cbCollectBody(response):
protocol = proto_helpers.AccumulatingProtocol()
response.deliverBody(protocol)
d = protocol.closedDeferred = defer.Deferred()
d.addCallback(lambda _: (protocol, response))
return d
d.addCallback(cbCollectBody)
return d
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
logObserver = proto_helpers.EventLoggingObserver()
globalLogPublisher.addObserver(logObserver)
req = [None]
class ReportRequestHeaders(resource.Resource):
def render(self, request):
req[0] = request
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return b""
def check_logs():
msgs = [e["log_format"] for e in logObserver]
self.assertIn('connected to publisher', msgs)
self.assertIn(
"could not connect to distributed web service: {msg}",
msgs
)
self.assertIn(req[0], msgs)
globalLogPublisher.removeObserver(logObserver)
request = self._requestTest(
ReportRequestHeaders(), headers=Headers({'foo': ['bar']}))
def cbRequested(result):
self.f1.proto.notifyOnDisconnect(check_logs)
self.assertEqual(requestHeaders[b'Foo'], [b'bar'])
request.addCallback(cbRequested)
return request
def test_requestResponseCode(self):
"""
The response code can be set by the request object passed to a
distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200)
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"OK")
request.addCallback(cbRequested)
return request
def test_requestResponseCodeMessage(self):
"""
The response code and message can be set by the request object passed to
a distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200, b"some-message")
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"some-message")
request.addCallback(cbRequested)
return request
def test_largeWrite(self):
"""
If a string longer than the Banana size limit is passed to the
L{distrib.Request} passed to the remote resource, it is broken into
smaller strings to be transported over the PB connection.
"""
class LargeWrite(resource.Resource):
def render(self, request):
request.write(b'x' * SIZE_LIMIT + b'y')
request.finish()
return server.NOT_DONE_YET
request = self._requestTest(LargeWrite())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_largeReturn(self):
"""
Like L{test_largeWrite}, but for the case where C{render} returns a
long string rather than explicitly passing it to L{Request.write}.
"""
class LargeReturn(resource.Resource):
def render(self, request):
return b'x' * SIZE_LIMIT + b'y'
request = self._requestTest(LargeReturn())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([b''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
errors = self.flushLoggedErrors(pb.NoSuchMethod)
self.assertEqual(len(errors), 1)
# The error page is rendered as HTML.
expected = [
b'',
b'<html>',
b' <head><title>500 - Server Connection Lost</title></head>',
b' <body>',
b' <h1>Server Connection Lost</h1>',
b' <p>Connection to distributed server lost:'
b'<pre>'
b'[Failure instance: Traceback from remote host -- '
b'twisted.spread.flavors.NoSuchMethod: '
b'No such method: remote_request',
b']</pre></p>',
b' </body>',
b'</html>',
b''
]
self.assertEqual([b'\n'.join(expected)], request.written)
d.addCallback(cbRendered)
return d
def test_logFailed(self):
"""
When a request fails, the string form of the failure is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
f = failure.Failure(ArbitraryError())
request = DummyRequest([b''])
issue = distrib.Issue(request)
issue.failed(f)
self.assertEquals(1, len(logObserver))
self.assertIn(
"Failure instance",
logObserver[0]["log_format"]
)
def test_requestFail(self):
"""
When L{twisted.web.distrib.Request}'s fail is called, the failure
is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
err = ArbitraryError()
f = failure.Failure(err)
req = distrib.Request(DummyChannel())
req.fail(f)
self.flushLoggedErrors(ArbitraryError)
self.assertEquals(1, len(logObserver))
self.assertIs(logObserver[0]["log_failure"], f)
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path)
def test_invalidMethod(self):
"""
L{UserDirectory.render} raises L{UnsupportedMethod} in response to a
non-I{GET} request.
"""
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(
server.UnsupportedMethod, self.directory.render, request)
def test_render(self):
"""
L{UserDirectory} renders a list of links to available user content
in response to a I{GET} request.
"""
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# This really only works if it's a unix socket, but the implementation
# doesn't currently check for that. It probably should someday, and
# then skip users with non-sockets.
web.child('.twistd-web-pb').setContent(b"")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(b''.join(request.written))
# Each user should have an li with a link to their page.
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
"""
If L{UserDirectory} is instantiated with no arguments, it uses the
L{pwd} module as its password database.
"""
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
|
length_normalize | Length normalize the matrix
Args:
matrix (np.ndarray): Input matrix that needs to be normalized
Returns:
Normalized matrix | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from sklearn.decomposition import PCA
from reco_utils.dataset.download_utils import maybe_download
from IPython import embed
# MASKED: length_normalize function (lines 10-21)
def mean_center(matrix):
"""Performs mean centering across axis 0
Args:
matrix (np.ndarray): Input matrix that needs to be mean centered
"""
avg = np.mean(matrix, axis=0)
matrix -= avg
def reduce_dims(matrix, target_dim):
"""Reduce dimensionality of the data using PCA.
Args:
matrix (np.ndarray): Matrix of the form (n_sampes, n_features)
target_dim (uint): Dimension to which n_features should be reduced to.
"""
model = PCA(n_components=target_dim)
model.fit(matrix)
return model.transform(matrix) | def length_normalize(matrix):
"""Length normalize the matrix
Args:
matrix (np.ndarray): Input matrix that needs to be normalized
Returns:
Normalized matrix
"""
norms = np.sqrt(np.sum(matrix**2, axis=1))
norms[norms == 0] = 1
return matrix / norms[:, np.newaxis] | 10 | 21 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from sklearn.decomposition import PCA
from reco_utils.dataset.download_utils import maybe_download
from IPython import embed
def length_normalize(matrix):
"""Length normalize the matrix
Args:
matrix (np.ndarray): Input matrix that needs to be normalized
Returns:
Normalized matrix
"""
norms = np.sqrt(np.sum(matrix**2, axis=1))
norms[norms == 0] = 1
return matrix / norms[:, np.newaxis]
def mean_center(matrix):
"""Performs mean centering across axis 0
Args:
matrix (np.ndarray): Input matrix that needs to be mean centered
"""
avg = np.mean(matrix, axis=0)
matrix -= avg
def reduce_dims(matrix, target_dim):
"""Reduce dimensionality of the data using PCA.
Args:
matrix (np.ndarray): Matrix of the form (n_sampes, n_features)
target_dim (uint): Dimension to which n_features should be reduced to.
"""
model = PCA(n_components=target_dim)
model.fit(matrix)
return model.transform(matrix)
|
reduce_dims | Reduce dimensionality of the data using PCA.
Args:
matrix (np.ndarray): Matrix of the form (n_sampes, n_features)
target_dim (uint): Dimension to which n_features should be reduced to. | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from sklearn.decomposition import PCA
from reco_utils.dataset.download_utils import maybe_download
from IPython import embed
def length_normalize(matrix):
"""Length normalize the matrix
Args:
matrix (np.ndarray): Input matrix that needs to be normalized
Returns:
Normalized matrix
"""
norms = np.sqrt(np.sum(matrix**2, axis=1))
norms[norms == 0] = 1
return matrix / norms[:, np.newaxis]
def mean_center(matrix):
"""Performs mean centering across axis 0
Args:
matrix (np.ndarray): Input matrix that needs to be mean centered
"""
avg = np.mean(matrix, axis=0)
matrix -= avg
# MASKED: reduce_dims function (lines 34-44) | def reduce_dims(matrix, target_dim):
"""Reduce dimensionality of the data using PCA.
Args:
matrix (np.ndarray): Matrix of the form (n_sampes, n_features)
target_dim (uint): Dimension to which n_features should be reduced to.
"""
model = PCA(n_components=target_dim)
model.fit(matrix)
return model.transform(matrix) | 34 | 44 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from sklearn.decomposition import PCA
from reco_utils.dataset.download_utils import maybe_download
from IPython import embed
def length_normalize(matrix):
"""Length normalize the matrix
Args:
matrix (np.ndarray): Input matrix that needs to be normalized
Returns:
Normalized matrix
"""
norms = np.sqrt(np.sum(matrix**2, axis=1))
norms[norms == 0] = 1
return matrix / norms[:, np.newaxis]
def mean_center(matrix):
"""Performs mean centering across axis 0
Args:
matrix (np.ndarray): Input matrix that needs to be mean centered
"""
avg = np.mean(matrix, axis=0)
matrix -= avg
def reduce_dims(matrix, target_dim):
"""Reduce dimensionality of the data using PCA.
Args:
matrix (np.ndarray): Matrix of the form (n_sampes, n_features)
target_dim (uint): Dimension to which n_features should be reduced to.
"""
model = PCA(n_components=target_dim)
model.fit(matrix)
return model.transform(matrix)
|
__init__ | Initializes the learning rate scheduler.
:param optimizer: A PyTorch optimizer.
:param warmup_epochs: The number of epochs during which to linearly increase the learning rate.
:param total_epochs: The total number of epochs.
:param steps_per_epoch: The number of steps (batches) per epoch.
:param init_lr: The initial learning rate.
:param max_lr: The maximum learning rate (achieved after warmup_epochs).
:param final_lr: The final learning rate (achieved after total_epochs). | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : utils_node.py
@Time : 2022/03/08 14:35:13
@Author : Jianwen Chen
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2021-2022, SAIL-Lab
'''
######################################## import area ########################################
# common library
import os
import random
import torch
import torch.nn as nn
import numpy as np
from tqdm import tqdm
from sklearn import metrics
from torch.optim.lr_scheduler import _LRScheduler
######################################## function area ########################################
def seed_everything(seed=2021):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def initialize_weights(model):
"""
Initializes the weights of a model in place.
:param model: An nn.Module.
"""
for param in model.parameters():
if param.dim() > 1:
nn.init.xavier_normal_(param)
def loop(data_loader, model, optimizer, scheduler, device):
batch_size = data_loader.batch_size
data_loader = tqdm(data_loader) if optimizer is not None else data_loader
loss_sum, y_true, y_pred = 0.0, list(), list()
for batch in data_loader:
smiles, mols, batch_node_features, batch_edge_features, batch_distance_matrix, labels = batch
# add mask
batch_masks = torch.sum(torch.abs(batch_node_features), dim=-1) != 0
# (batch, max_length, node_dim)
batch_node_features = batch_node_features.to(device)
# (batch, max_length, max_length, edge_dim)
batch_edge_features = batch_edge_features.to(device)
# (batch, max_length, max_length)
batch_distance_matrix = batch_distance_matrix.to(device)
# (batch, max_length)
batch_masks = batch_masks.to(device)
# (batch, max_length, 1)
labels = labels.to(device)
# (batch, max_length, 1)
outputs = model(batch_node_features, batch_edge_features, batch_distance_matrix, batch_masks, device)
# loss calculation
loss = cal_loss(y_true=labels, y_pred=outputs, device=device)
loss_sum += loss.item()
if optimizer is not None:
# clear gradients for this training step
optimizer.zero_grad()
# back propagation, compute gradients
loss.backward()
# apply gradients
optimizer.step()
# NormLR need step every batch
if scheduler is not None:
scheduler.step()
# collect result
labels = labels.detach().cpu().numpy()
outputs = outputs.detach().cpu().numpy()
y_true.append([])
y_pred.append([])
for label, output in zip(labels, outputs):
label, output = label.flatten(), output.flatten()
for l, o in zip(label, output):
if l != 0.0:
y_true[-1].append(l)
y_pred[-1].append(o)
# clear cuda cache
torch.cuda.empty_cache()
# metric calculation
results = cal_metric(y_true=y_true, y_pred=y_pred)
results['loss'] = loss_sum / (len(data_loader) * batch_size)
return results
def cal_loss(y_true, y_pred, device):
y_true, y_pred = y_true.flatten(), y_pred.flatten()
y_mask = torch.where(y_true != 0.0, torch.full_like(y_true, 1), torch.full_like(y_true, 0))
loss = torch.sum(torch.abs(y_true - y_pred) * y_mask) / torch.sum(y_mask)
return loss
def cal_metric(y_true, y_pred):
concatenate_true, concatenate_pred = np.concatenate(y_true, axis=-1), np.concatenate(y_pred, axis=-1)
mae = metrics.mean_absolute_error(concatenate_true, concatenate_pred)
r2 = metrics.r2_score(concatenate_true, concatenate_pred)
return {'mae':mae, 'r2':r2}
class NoamLR(_LRScheduler):
"""
Noam learning rate scheduler with piecewise linear increase and exponential decay.
The learning rate increases linearly from init_lr to max_lr over the course of
the first warmup_steps (where warmup_steps = warmup_epochs * steps_per_epoch).
Then the learning rate decreases exponentially from max_lr to final_lr over the
course of the remaining total_steps - warmup_steps (where total_steps =
total_epochs * steps_per_epoch). This is roughly based on the learning rate
schedule from Attention is All You Need, section 5.3 (https://arxiv.org/abs/1706.03762).
"""
# MASKED: __init__ function (lines 139-171)
def get_lr(self):
"""Gets a list of the current learning rates."""
return list(self.lr)
def step(self, current_step: int = None):
"""
Updates the learning rate by taking a step.
:param current_step: Optionally specify what step to set the learning rate to.
If None, current_step = self.current_step + 1.
"""
if current_step is not None:
self.current_step = current_step
else:
self.current_step += 1
for i in range(self.num_lrs):
if self.current_step <= self.warmup_steps[i]:
self.lr[i] = self.init_lr[i] + self.current_step * self.linear_increment[i]
elif self.current_step <= self.total_steps[i]:
self.lr[i] = self.max_lr[i] * (self.exponential_gamma[i] ** (self.current_step - self.warmup_steps[i]))
else: # theoretically this case should never be reached since training should stop at total_steps
self.lr[i] = self.final_lr[i]
self.optimizer.param_groups[i]['lr'] = self.lr[i]
| def __init__(self, optimizer, warmup_epochs, total_epochs, steps_per_epoch, init_lr, max_lr, final_lr):
"""
Initializes the learning rate scheduler.
:param optimizer: A PyTorch optimizer.
:param warmup_epochs: The number of epochs during which to linearly increase the learning rate.
:param total_epochs: The total number of epochs.
:param steps_per_epoch: The number of steps (batches) per epoch.
:param init_lr: The initial learning rate.
:param max_lr: The maximum learning rate (achieved after warmup_epochs).
:param final_lr: The final learning rate (achieved after total_epochs).
"""
assert len(optimizer.param_groups) == len(warmup_epochs) == len(total_epochs) == len(init_lr) == len(max_lr) == len(final_lr)
self.num_lrs = len(optimizer.param_groups)
self.optimizer = optimizer
self.warmup_epochs = np.array(warmup_epochs)
self.total_epochs = np.array(total_epochs)
self.steps_per_epoch = steps_per_epoch
self.init_lr = np.array(init_lr)
self.max_lr = np.array(max_lr)
self.final_lr = np.array(final_lr)
self.current_step = 0
self.lr = init_lr
self.warmup_steps = (self.warmup_epochs * self.steps_per_epoch).astype(int)
self.total_steps = self.total_epochs * self.steps_per_epoch
self.linear_increment = (self.max_lr - self.init_lr) / self.warmup_steps
self.exponential_gamma = (self.final_lr / self.max_lr) ** (1 / (self.total_steps - self.warmup_steps))
super(NoamLR, self).__init__(optimizer) | 139 | 171 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : utils_node.py
@Time : 2022/03/08 14:35:13
@Author : Jianwen Chen
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2021-2022, SAIL-Lab
'''
######################################## import area ########################################
# common library
import os
import random
import torch
import torch.nn as nn
import numpy as np
from tqdm import tqdm
from sklearn import metrics
from torch.optim.lr_scheduler import _LRScheduler
######################################## function area ########################################
def seed_everything(seed=2021):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def initialize_weights(model):
"""
Initializes the weights of a model in place.
:param model: An nn.Module.
"""
for param in model.parameters():
if param.dim() > 1:
nn.init.xavier_normal_(param)
def loop(data_loader, model, optimizer, scheduler, device):
batch_size = data_loader.batch_size
data_loader = tqdm(data_loader) if optimizer is not None else data_loader
loss_sum, y_true, y_pred = 0.0, list(), list()
for batch in data_loader:
smiles, mols, batch_node_features, batch_edge_features, batch_distance_matrix, labels = batch
# add mask
batch_masks = torch.sum(torch.abs(batch_node_features), dim=-1) != 0
# (batch, max_length, node_dim)
batch_node_features = batch_node_features.to(device)
# (batch, max_length, max_length, edge_dim)
batch_edge_features = batch_edge_features.to(device)
# (batch, max_length, max_length)
batch_distance_matrix = batch_distance_matrix.to(device)
# (batch, max_length)
batch_masks = batch_masks.to(device)
# (batch, max_length, 1)
labels = labels.to(device)
# (batch, max_length, 1)
outputs = model(batch_node_features, batch_edge_features, batch_distance_matrix, batch_masks, device)
# loss calculation
loss = cal_loss(y_true=labels, y_pred=outputs, device=device)
loss_sum += loss.item()
if optimizer is not None:
# clear gradients for this training step
optimizer.zero_grad()
# back propagation, compute gradients
loss.backward()
# apply gradients
optimizer.step()
# NormLR need step every batch
if scheduler is not None:
scheduler.step()
# collect result
labels = labels.detach().cpu().numpy()
outputs = outputs.detach().cpu().numpy()
y_true.append([])
y_pred.append([])
for label, output in zip(labels, outputs):
label, output = label.flatten(), output.flatten()
for l, o in zip(label, output):
if l != 0.0:
y_true[-1].append(l)
y_pred[-1].append(o)
# clear cuda cache
torch.cuda.empty_cache()
# metric calculation
results = cal_metric(y_true=y_true, y_pred=y_pred)
results['loss'] = loss_sum / (len(data_loader) * batch_size)
return results
def cal_loss(y_true, y_pred, device):
y_true, y_pred = y_true.flatten(), y_pred.flatten()
y_mask = torch.where(y_true != 0.0, torch.full_like(y_true, 1), torch.full_like(y_true, 0))
loss = torch.sum(torch.abs(y_true - y_pred) * y_mask) / torch.sum(y_mask)
return loss
def cal_metric(y_true, y_pred):
concatenate_true, concatenate_pred = np.concatenate(y_true, axis=-1), np.concatenate(y_pred, axis=-1)
mae = metrics.mean_absolute_error(concatenate_true, concatenate_pred)
r2 = metrics.r2_score(concatenate_true, concatenate_pred)
return {'mae':mae, 'r2':r2}
class NoamLR(_LRScheduler):
"""
Noam learning rate scheduler with piecewise linear increase and exponential decay.
The learning rate increases linearly from init_lr to max_lr over the course of
the first warmup_steps (where warmup_steps = warmup_epochs * steps_per_epoch).
Then the learning rate decreases exponentially from max_lr to final_lr over the
course of the remaining total_steps - warmup_steps (where total_steps =
total_epochs * steps_per_epoch). This is roughly based on the learning rate
schedule from Attention is All You Need, section 5.3 (https://arxiv.org/abs/1706.03762).
"""
def __init__(self, optimizer, warmup_epochs, total_epochs, steps_per_epoch, init_lr, max_lr, final_lr):
"""
Initializes the learning rate scheduler.
:param optimizer: A PyTorch optimizer.
:param warmup_epochs: The number of epochs during which to linearly increase the learning rate.
:param total_epochs: The total number of epochs.
:param steps_per_epoch: The number of steps (batches) per epoch.
:param init_lr: The initial learning rate.
:param max_lr: The maximum learning rate (achieved after warmup_epochs).
:param final_lr: The final learning rate (achieved after total_epochs).
"""
assert len(optimizer.param_groups) == len(warmup_epochs) == len(total_epochs) == len(init_lr) == len(max_lr) == len(final_lr)
self.num_lrs = len(optimizer.param_groups)
self.optimizer = optimizer
self.warmup_epochs = np.array(warmup_epochs)
self.total_epochs = np.array(total_epochs)
self.steps_per_epoch = steps_per_epoch
self.init_lr = np.array(init_lr)
self.max_lr = np.array(max_lr)
self.final_lr = np.array(final_lr)
self.current_step = 0
self.lr = init_lr
self.warmup_steps = (self.warmup_epochs * self.steps_per_epoch).astype(int)
self.total_steps = self.total_epochs * self.steps_per_epoch
self.linear_increment = (self.max_lr - self.init_lr) / self.warmup_steps
self.exponential_gamma = (self.final_lr / self.max_lr) ** (1 / (self.total_steps - self.warmup_steps))
super(NoamLR, self).__init__(optimizer)
def get_lr(self):
"""Gets a list of the current learning rates."""
return list(self.lr)
def step(self, current_step: int = None):
"""
Updates the learning rate by taking a step.
:param current_step: Optionally specify what step to set the learning rate to.
If None, current_step = self.current_step + 1.
"""
if current_step is not None:
self.current_step = current_step
else:
self.current_step += 1
for i in range(self.num_lrs):
if self.current_step <= self.warmup_steps[i]:
self.lr[i] = self.init_lr[i] + self.current_step * self.linear_increment[i]
elif self.current_step <= self.total_steps[i]:
self.lr[i] = self.max_lr[i] * (self.exponential_gamma[i] ** (self.current_step - self.warmup_steps[i]))
else: # theoretically this case should never be reached since training should stop at total_steps
self.lr[i] = self.final_lr[i]
self.optimizer.param_groups[i]['lr'] = self.lr[i]
|
get_indices | This is a tool that will read a CIF file and return the unique T-sites,
their multiplicities, and an example atom index.
It also does the same for the unique O-sites in the framework.
This tool only works on CIFs that are formatted the same way as the IZA
Structure Database CIFs. | __all__ = ['read_cif','cif_site_labels']
from ase.io import read
from ase.spacegroup import spacegroup
import sys
import os
import logging
from math import *
import numpy as np
import pkg_resources
import warnings
warnings.filterwarnings("ignore")
path = '.temp_files/'
filepath = pkg_resources.resource_filename(__name__,path)
'''
NOTE ABOUT CIF FILE FORMATS:
CIFs must include '_symmetry_Int_Taables_number' to be read by ASE.
If this is not included please edit your CIF file to include this information.
'''
def get_atom_lines(alllines):
order = []
for i,line in enumerate(alllines):
if '_atom' in line:
order.append(line)
start = i+1
end = None
for i,line in enumerate(alllines[start:]):
if len(line.split()) == 0:
end = start+i-1
break
if not end:
end = len(alllines)-1
new_order = []
for i,o in enumerate(order):
if 'site_label' in o:
new_order.append(i)
if 'site_type_symbol' in o:
new_order.append(i)
if 'fract_x' in o:
new_order.append(i)
if 'fract_y' in o:
new_order.append(i)
if 'fract_z' in o:
new_order.append(i)
return start,end,new_order
def fix_cif(cif):
f = open(cif,"r")
alllines = f.readlines()
f.close()
for i, line in enumerate(alllines):
if 'IT_coordinate_system_code' in line:
fields = line.split()
alllines[i] = '_symmetry_space_group_setting {0} \n'.format(fields[-1])
if '_atom_site_type_symbol' in line and '_atom_site_label' in alllines[i+1]:
alllines[i],alllines[i+1] = alllines[i+1],alllines[i]
file_name = cif.rstrip('.cif')
temp_file = '{0}/{1}_temp.cif'.format(filepath,file_name.split('/')[-1])
f = open(temp_file,"w")
f.writelines(alllines)
f.close()
atoms = read(temp_file);
os.remove(temp_file)
return atoms, alllines
def get_tsites(cif):
from ase.geometry import get_distances
tsites = []
tpos = []
z,alllines = fix_cif(cif)
si = [atom.index for atom in z if atom.symbol!='O']
start,end,order = get_atom_lines(alllines)
for line in alllines[start:end+1]:
if 'Si' in line or 'T' in line:
line = line.split()
temp_label = line[order[0]]
if not any(str.isdigit(c) for c in temp_label):
temp_label = line[order[1]]
if 'Si' in temp_label:
temp_label = temp_label.replace('Si','T')
tsites.append(temp_label)
pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])]
tpos.append([round(num,2) for num in pos])
tpos = np.array(tpos)
pos = z[si].get_scaled_positions()
tinds = []
tmults = []
t_class = []
for tp in tpos:
for i,p in enumerate(pos):
p = [round(num,2) for num in p]
diff = abs(tp-p)
if sum(diff) <= 0.03:
tinds.append(si[i])
for i in range(1,len(tsites)):
tmults.append(tinds[i]-tinds[i-1])
tmults.append(si[-1]-tinds[-1]+1)
#
# si = [atom.index for atom in z if atom.symbol=='Si']
# o = [atom.index for atom in z if atom.symbol=='O']
# si_pos = z[si].positions
# cell = z.cell
# distances = get_distances(si_pos,si_pos,cell=cell,pbc=[1,1,1])[1]
#
# for i in tinds:
# orig_ind = si.index(i)
# dists = sorted(distances[orig_ind])
# t_class.append([round(num,2) for num in dists])
#
#
# for i,d in enumerate(t_class):
# for j,t in enumerate(distances):
# dist = [round(num,2) for num in sorted(t)]
# if np.array_equal(dist,d):
# dist = [round(num,2) for num in sorted(t)]
# d = np.array(d)
# dist = np.array(dist)
# diff = abs(d - dist)
# if sum(diff) <= 0.1:
# tmults[i]+=1
n = len(si)
sn = sum(tmults)
if n != sn:
print('Something Went Wrong With T Sites')
return tsites, tmults, tinds
def get_osites(cif):
from ase.geometry import get_distances
osites = []
opos = []
z,alllines = fix_cif(cif)
start,end,order = get_atom_lines(alllines)
for line in alllines[start:end+1]:
if 'O' in line:
line = line.split()
temp_label = line[order[0]]
if not any(str.isdigit(c) for c in temp_label):
temp_label = line[order[1]]
osites.append(temp_label)
pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])]
opos.append([round(num,2) for num in pos])
opos = np.array(opos)
pos = z.get_scaled_positions()
oinds = []
omults = []
o_class = []
si = [atom.index for atom in z if atom.symbol=='Si']
o = [atom.index for atom in z if atom.symbol=='O']
o_pos = z[o].get_scaled_positions()
for op in opos:
for i,p in enumerate(o_pos):
p = np.array([round(num,2) for num in p])
diff = abs(op-p)
if sum(diff) <= 0.02:
oinds.append(o[i])
for i in range(1,len(osites)):
omults.append(oinds[i]-oinds[i-1])
omults.append(o[-1]-oinds[-1]+1)
# all_pos = z.positions
# o_pos = z[o].positions
# si_pos = z[si].positions
# cell = z.cell
# distances = get_distances(o_pos,all_pos,cell=cell,pbc=[1,1,1])[1]
#
# for i in oinds:
# orig_ind = o.index(i)
# dists = sorted(distances[orig_ind])
# o_class.append([round(num,2) for num in dists])
#
# for i,d in enumerate(o_class):
# for j,t in enumerate(distances):
# dist = [round(num,2) for num in sorted(t)]
# d = np.array(d)
# dist = np.array(dist)
# diff = abs(d - dist)
# if sum(diff) <= 0.05:
# omults[i]+=1
n = len(o)
sn = sum(omults)
if n != sn:
print('Something Went Wrong With O Sites')
return osites, omults, oinds
def read_cif(cif):
atoms, alllines = fix_cif(cif)
ts,tm,tinds = get_tsites(cif)
os,om,oinds = get_osites(cif)
return atoms,ts,tm,tinds,os,om,oinds
def cif_site_labels(cif):
atoms,ts,tm,tinds,os,om,oinds = read_cif(cif)
labels = {}
for i,t in enumerate(ts):
for j in range(tm[i]):
labels[tinds[i]+j] = t
for i,o in enumerate(os):
for j in range(om[i]):
labels[oinds[i]+j] = o
return labels
''' DEPRECRATED FUNCTIONS'''
def float_with_error(x):
"""
some value in cif accompanies error like "1.234(5)
"""
if "?" in x:
return 0
pos = x.find("(")
if pos >= 0:
x = x[:pos]
return float(x)
def get_mults(cif):
# read the cif file
F = open(cif,"r")
alllines = F.readlines()
F.close()
# Parse out data from the cif file
for i,line in enumerate(alllines):
if '_cell_length_a' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
La = field
if '_cell_length_b' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
Lb = field
if '_cell_length_c' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
Lc = field
if '_cell_angle_alpha' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
alpha = field
if '_cell_angle_beta' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
beta = field
if '_cell_angle_gamma' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
gamma = field
if '_space_group_symop' in line or '_symmetry_equiv_pos' in line or '_space_group' in line:
n = i
lastline = len(alllines)
loops = []
for i,line in enumerate(alllines):
if 'loop' in line:
loops.append(i)
ops = []
for i in range(n+1,loops[1]):
n+=1
line = alllines[i]
if 'x' in line or 'X' in line:
ops.append(line.replace("'",''))
for i in range(len(ops)):
ops[i] = ops[i].replace("0/", "0./") # also for e.g. 10/9
ops[i] = ops[i].replace("1/", "1./")
ops[i] = ops[i].replace("2/", "2./")
ops[i] = ops[i].replace("3/", "3./")
ops[i] = ops[i].replace("4/", "4./")
ops[i] = ops[i].replace("5/", "5./")
ops[i] = ops[i].replace("6/", "6./")
ops[i] = ops[i].replace("7/", "7./")
ops[i] = ops[i].replace("8/", "8./")
ops[i] = ops[i].replace("9/", "9./")
osites = []
tsites = []
atoms = []
for j in range(n,lastline):
line = alllines[j]
if '_' not in line:
fields = line.split()
if len(fields) >3:
tmp = (fields[0],float(fields[2]),float(fields[3]),float(fields[4]))
if 'O' in fields[0]:
osites.append(fields[0])
if 'T' in fields[0]:
tsites.append(fields[0])
atoms.append(tmp)
for i in range(len(atoms)):
(name,xn,yn,zn) = atoms[i]
xn = (xn + 10.0) % 1.0
yn = (yn + 10.0) % 1.0
zn = (zn + 10.0) % 1.0
atoms[i] = (name,xn,yn,zn)
# perfrom symmetry operations
label_list = []
symbols = []
positions = []
for i in atoms:
label_list.append(i[0])
eps = 0.01
imax = len(atoms)
i=0
while (i<imax):
label,x,y,z=atoms[i]
for op in ops:
op = op.replace("'",'')
op = op.lower()
xn,yn,zn = eval(op)
xn = (xn + 10.0) % 1.0
yn = (yn + 10.0) % 1.0
zn = (zn + 10.0) % 1.0
new_atom = True
for at in atoms:
if (abs(at[1]-xn) < eps and abs(at[2]-yn) < eps and abs(at[3]-zn) < eps):
new_atom = False
if new_atom:
p1 = np.array([at[1],at[2],at[3]])
p2 = np.array([xn,yn,zn])
diff = abs(p1-p2)
diff = np.round(diff,2)
count = np.count_nonzero(diff)
if count ==1 and 1 in diff:
new_atom = False
if new_atom:
atoms.append( (label,xn,yn,zn) )
label_list.append(label)
i += 1
imax =len(atoms)
#atoms2 = Atoms(symbols,scaled_positions=positions,cell = [La,Lb,Lc,alpha,beta,gamma])
# count up the osits
label_list = sorted(label_list)
omults = []
for o in osites:
count = label_list.count(o)
omults.append(count)
tmults = []
for t in tsites:
count = label_list.count(t)
tmults.append(count)
return tsites, tmults, osites, omults
# MASKED: get_indices function (lines 378-415) | def get_indices(cif):
'''
This is a tool that will read a CIF file and return the unique T-sites,
their multiplicities, and an example atom index.
It also does the same for the unique O-sites in the framework.
This tool only works on CIFs that are formatted the same way as the IZA
Structure Database CIFs.
'''
tsites, tmults, osites, omults = get_mults(cif)
f = open(cif,"r")
alllines = f.read()
f.close()
for i, line in enumerate(alllines):
if 'IT_coordinate_system_code' in line:
fields = line.split()
alllines[i] = '_symmetry_space_group_setting {0}'.format(fields[-1])
atoms = read(cif)
oinds = [atom.index for atom in atoms if atom.symbol=='O']
index = 0
first_os = []
for i,m in enumerate(omults):
first_os.append(oinds[index])
index+=m
tinds = [atom.index for atom in atoms if atom.symbol !='O']
index = 0
first_ts = []
for i,m, in enumerate(tmults):
first_ts.append(tinds[index])
index+=m
return tsites,tmults,first_ts, osites, omults, first_os | 378 | 415 | __all__ = ['read_cif','cif_site_labels']
from ase.io import read
from ase.spacegroup import spacegroup
import sys
import os
import logging
from math import *
import numpy as np
import pkg_resources
import warnings
warnings.filterwarnings("ignore")
path = '.temp_files/'
filepath = pkg_resources.resource_filename(__name__,path)
'''
NOTE ABOUT CIF FILE FORMATS:
CIFs must include '_symmetry_Int_Taables_number' to be read by ASE.
If this is not included please edit your CIF file to include this information.
'''
def get_atom_lines(alllines):
order = []
for i,line in enumerate(alllines):
if '_atom' in line:
order.append(line)
start = i+1
end = None
for i,line in enumerate(alllines[start:]):
if len(line.split()) == 0:
end = start+i-1
break
if not end:
end = len(alllines)-1
new_order = []
for i,o in enumerate(order):
if 'site_label' in o:
new_order.append(i)
if 'site_type_symbol' in o:
new_order.append(i)
if 'fract_x' in o:
new_order.append(i)
if 'fract_y' in o:
new_order.append(i)
if 'fract_z' in o:
new_order.append(i)
return start,end,new_order
def fix_cif(cif):
f = open(cif,"r")
alllines = f.readlines()
f.close()
for i, line in enumerate(alllines):
if 'IT_coordinate_system_code' in line:
fields = line.split()
alllines[i] = '_symmetry_space_group_setting {0} \n'.format(fields[-1])
if '_atom_site_type_symbol' in line and '_atom_site_label' in alllines[i+1]:
alllines[i],alllines[i+1] = alllines[i+1],alllines[i]
file_name = cif.rstrip('.cif')
temp_file = '{0}/{1}_temp.cif'.format(filepath,file_name.split('/')[-1])
f = open(temp_file,"w")
f.writelines(alllines)
f.close()
atoms = read(temp_file);
os.remove(temp_file)
return atoms, alllines
def get_tsites(cif):
from ase.geometry import get_distances
tsites = []
tpos = []
z,alllines = fix_cif(cif)
si = [atom.index for atom in z if atom.symbol!='O']
start,end,order = get_atom_lines(alllines)
for line in alllines[start:end+1]:
if 'Si' in line or 'T' in line:
line = line.split()
temp_label = line[order[0]]
if not any(str.isdigit(c) for c in temp_label):
temp_label = line[order[1]]
if 'Si' in temp_label:
temp_label = temp_label.replace('Si','T')
tsites.append(temp_label)
pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])]
tpos.append([round(num,2) for num in pos])
tpos = np.array(tpos)
pos = z[si].get_scaled_positions()
tinds = []
tmults = []
t_class = []
for tp in tpos:
for i,p in enumerate(pos):
p = [round(num,2) for num in p]
diff = abs(tp-p)
if sum(diff) <= 0.03:
tinds.append(si[i])
for i in range(1,len(tsites)):
tmults.append(tinds[i]-tinds[i-1])
tmults.append(si[-1]-tinds[-1]+1)
#
# si = [atom.index for atom in z if atom.symbol=='Si']
# o = [atom.index for atom in z if atom.symbol=='O']
# si_pos = z[si].positions
# cell = z.cell
# distances = get_distances(si_pos,si_pos,cell=cell,pbc=[1,1,1])[1]
#
# for i in tinds:
# orig_ind = si.index(i)
# dists = sorted(distances[orig_ind])
# t_class.append([round(num,2) for num in dists])
#
#
# for i,d in enumerate(t_class):
# for j,t in enumerate(distances):
# dist = [round(num,2) for num in sorted(t)]
# if np.array_equal(dist,d):
# dist = [round(num,2) for num in sorted(t)]
# d = np.array(d)
# dist = np.array(dist)
# diff = abs(d - dist)
# if sum(diff) <= 0.1:
# tmults[i]+=1
n = len(si)
sn = sum(tmults)
if n != sn:
print('Something Went Wrong With T Sites')
return tsites, tmults, tinds
def get_osites(cif):
from ase.geometry import get_distances
osites = []
opos = []
z,alllines = fix_cif(cif)
start,end,order = get_atom_lines(alllines)
for line in alllines[start:end+1]:
if 'O' in line:
line = line.split()
temp_label = line[order[0]]
if not any(str.isdigit(c) for c in temp_label):
temp_label = line[order[1]]
osites.append(temp_label)
pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])]
opos.append([round(num,2) for num in pos])
opos = np.array(opos)
pos = z.get_scaled_positions()
oinds = []
omults = []
o_class = []
si = [atom.index for atom in z if atom.symbol=='Si']
o = [atom.index for atom in z if atom.symbol=='O']
o_pos = z[o].get_scaled_positions()
for op in opos:
for i,p in enumerate(o_pos):
p = np.array([round(num,2) for num in p])
diff = abs(op-p)
if sum(diff) <= 0.02:
oinds.append(o[i])
for i in range(1,len(osites)):
omults.append(oinds[i]-oinds[i-1])
omults.append(o[-1]-oinds[-1]+1)
# all_pos = z.positions
# o_pos = z[o].positions
# si_pos = z[si].positions
# cell = z.cell
# distances = get_distances(o_pos,all_pos,cell=cell,pbc=[1,1,1])[1]
#
# for i in oinds:
# orig_ind = o.index(i)
# dists = sorted(distances[orig_ind])
# o_class.append([round(num,2) for num in dists])
#
# for i,d in enumerate(o_class):
# for j,t in enumerate(distances):
# dist = [round(num,2) for num in sorted(t)]
# d = np.array(d)
# dist = np.array(dist)
# diff = abs(d - dist)
# if sum(diff) <= 0.05:
# omults[i]+=1
n = len(o)
sn = sum(omults)
if n != sn:
print('Something Went Wrong With O Sites')
return osites, omults, oinds
def read_cif(cif):
atoms, alllines = fix_cif(cif)
ts,tm,tinds = get_tsites(cif)
os,om,oinds = get_osites(cif)
return atoms,ts,tm,tinds,os,om,oinds
def cif_site_labels(cif):
atoms,ts,tm,tinds,os,om,oinds = read_cif(cif)
labels = {}
for i,t in enumerate(ts):
for j in range(tm[i]):
labels[tinds[i]+j] = t
for i,o in enumerate(os):
for j in range(om[i]):
labels[oinds[i]+j] = o
return labels
''' DEPRECRATED FUNCTIONS'''
def float_with_error(x):
"""
some value in cif accompanies error like "1.234(5)
"""
if "?" in x:
return 0
pos = x.find("(")
if pos >= 0:
x = x[:pos]
return float(x)
def get_mults(cif):
# read the cif file
F = open(cif,"r")
alllines = F.readlines()
F.close()
# Parse out data from the cif file
for i,line in enumerate(alllines):
if '_cell_length_a' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
La = field
if '_cell_length_b' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
Lb = field
if '_cell_length_c' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
Lc = field
if '_cell_angle_alpha' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
alpha = field
if '_cell_angle_beta' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
beta = field
if '_cell_angle_gamma' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
gamma = field
if '_space_group_symop' in line or '_symmetry_equiv_pos' in line or '_space_group' in line:
n = i
lastline = len(alllines)
loops = []
for i,line in enumerate(alllines):
if 'loop' in line:
loops.append(i)
ops = []
for i in range(n+1,loops[1]):
n+=1
line = alllines[i]
if 'x' in line or 'X' in line:
ops.append(line.replace("'",''))
for i in range(len(ops)):
ops[i] = ops[i].replace("0/", "0./") # also for e.g. 10/9
ops[i] = ops[i].replace("1/", "1./")
ops[i] = ops[i].replace("2/", "2./")
ops[i] = ops[i].replace("3/", "3./")
ops[i] = ops[i].replace("4/", "4./")
ops[i] = ops[i].replace("5/", "5./")
ops[i] = ops[i].replace("6/", "6./")
ops[i] = ops[i].replace("7/", "7./")
ops[i] = ops[i].replace("8/", "8./")
ops[i] = ops[i].replace("9/", "9./")
osites = []
tsites = []
atoms = []
for j in range(n,lastline):
line = alllines[j]
if '_' not in line:
fields = line.split()
if len(fields) >3:
tmp = (fields[0],float(fields[2]),float(fields[3]),float(fields[4]))
if 'O' in fields[0]:
osites.append(fields[0])
if 'T' in fields[0]:
tsites.append(fields[0])
atoms.append(tmp)
for i in range(len(atoms)):
(name,xn,yn,zn) = atoms[i]
xn = (xn + 10.0) % 1.0
yn = (yn + 10.0) % 1.0
zn = (zn + 10.0) % 1.0
atoms[i] = (name,xn,yn,zn)
# perfrom symmetry operations
label_list = []
symbols = []
positions = []
for i in atoms:
label_list.append(i[0])
eps = 0.01
imax = len(atoms)
i=0
while (i<imax):
label,x,y,z=atoms[i]
for op in ops:
op = op.replace("'",'')
op = op.lower()
xn,yn,zn = eval(op)
xn = (xn + 10.0) % 1.0
yn = (yn + 10.0) % 1.0
zn = (zn + 10.0) % 1.0
new_atom = True
for at in atoms:
if (abs(at[1]-xn) < eps and abs(at[2]-yn) < eps and abs(at[3]-zn) < eps):
new_atom = False
if new_atom:
p1 = np.array([at[1],at[2],at[3]])
p2 = np.array([xn,yn,zn])
diff = abs(p1-p2)
diff = np.round(diff,2)
count = np.count_nonzero(diff)
if count ==1 and 1 in diff:
new_atom = False
if new_atom:
atoms.append( (label,xn,yn,zn) )
label_list.append(label)
i += 1
imax =len(atoms)
#atoms2 = Atoms(symbols,scaled_positions=positions,cell = [La,Lb,Lc,alpha,beta,gamma])
# count up the osits
label_list = sorted(label_list)
omults = []
for o in osites:
count = label_list.count(o)
omults.append(count)
tmults = []
for t in tsites:
count = label_list.count(t)
tmults.append(count)
return tsites, tmults, osites, omults
def get_indices(cif):
'''
This is a tool that will read a CIF file and return the unique T-sites,
their multiplicities, and an example atom index.
It also does the same for the unique O-sites in the framework.
This tool only works on CIFs that are formatted the same way as the IZA
Structure Database CIFs.
'''
tsites, tmults, osites, omults = get_mults(cif)
f = open(cif,"r")
alllines = f.read()
f.close()
for i, line in enumerate(alllines):
if 'IT_coordinate_system_code' in line:
fields = line.split()
alllines[i] = '_symmetry_space_group_setting {0}'.format(fields[-1])
atoms = read(cif)
oinds = [atom.index for atom in atoms if atom.symbol=='O']
index = 0
first_os = []
for i,m in enumerate(omults):
first_os.append(oinds[index])
index+=m
tinds = [atom.index for atom in atoms if atom.symbol !='O']
index = 0
first_ts = []
for i,m, in enumerate(tmults):
first_ts.append(tinds[index])
index+=m
return tsites,tmults,first_ts, osites, omults, first_os
|
from_service_account_file | Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TextToSpeechClient: The constructed client. | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.texttospeech.v1beta1 TextToSpeech API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import grpc
from google.cloud.texttospeech_v1beta1.gapic import enums
from google.cloud.texttospeech_v1beta1.gapic import text_to_speech_client_config
from google.cloud.texttospeech_v1beta1.gapic.transports import (
text_to_speech_grpc_transport,
)
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2_grpc
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-texttospeech"
).version
class TextToSpeechClient(object):
"""Service that implements Google Cloud Text-to-Speech API."""
SERVICE_ADDRESS = "texttospeech.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.texttospeech.v1beta1.TextToSpeech"
# MASKED: from_service_account_file function (lines 51-67)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.TextToSpeechGrpcTransport,
Callable[[~.Credentials, type], ~.TextToSpeechGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = text_to_speech_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=text_to_speech_grpc_transport.TextToSpeechGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = text_to_speech_grpc_transport.TextToSpeechGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_voices(
self,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns a list of ``Voice`` supported for synthesis.
Example:
>>> from google.cloud import texttospeech_v1beta1
>>>
>>> client = texttospeech_v1beta1.TextToSpeechClient()
>>>
>>> response = client.list_voices()
Args:
language_code (str): Optional (but recommended)
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__ language tag.
If specified, the ListVoices call will only return voices that can be
used to synthesize this language\_code. E.g. when specifying "en-NZ",
you will get supported "en-*" voices; when specifying "no", you will get
supported "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices;
specifying "zh" will also get supported "cmn-*" voices; specifying
"zh-hk" will also get supported "yue-\*" voices.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.texttospeech_v1beta1.types.ListVoicesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_voices" not in self._inner_api_calls:
self._inner_api_calls[
"list_voices"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_voices,
default_retry=self._method_configs["ListVoices"].retry,
default_timeout=self._method_configs["ListVoices"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.ListVoicesRequest(language_code=language_code)
return self._inner_api_calls["list_voices"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def synthesize_speech(
self,
input_,
voice,
audio_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Synthesizes speech synchronously: receive results after all text input
has been processed.
Example:
>>> from google.cloud import texttospeech_v1beta1
>>>
>>> client = texttospeech_v1beta1.TextToSpeechClient()
>>>
>>> # TODO: Initialize `input_`:
>>> input_ = {}
>>>
>>> # TODO: Initialize `voice`:
>>> voice = {}
>>>
>>> # TODO: Initialize `audio_config`:
>>> audio_config = {}
>>>
>>> response = client.synthesize_speech(input_, voice, audio_config)
Args:
input_ (Union[dict, ~google.cloud.texttospeech_v1beta1.types.SynthesisInput]): Required. The Synthesizer requires either plain text or SSML as input.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.SynthesisInput`
voice (Union[dict, ~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams]): Required. The desired voice of the synthesized audio.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams`
audio_config (Union[dict, ~google.cloud.texttospeech_v1beta1.types.AudioConfig]): Required. The configuration of the synthesized audio.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.AudioConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.texttospeech_v1beta1.types.SynthesizeSpeechResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "synthesize_speech" not in self._inner_api_calls:
self._inner_api_calls[
"synthesize_speech"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.synthesize_speech,
default_retry=self._method_configs["SynthesizeSpeech"].retry,
default_timeout=self._method_configs["SynthesizeSpeech"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.SynthesizeSpeechRequest(
input=input_, voice=voice, audio_config=audio_config
)
return self._inner_api_calls["synthesize_speech"](
request, retry=retry, timeout=timeout, metadata=metadata
) | @classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TextToSpeechClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs) | 51 | 67 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.texttospeech.v1beta1 TextToSpeech API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import grpc
from google.cloud.texttospeech_v1beta1.gapic import enums
from google.cloud.texttospeech_v1beta1.gapic import text_to_speech_client_config
from google.cloud.texttospeech_v1beta1.gapic.transports import (
text_to_speech_grpc_transport,
)
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2_grpc
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-texttospeech"
).version
class TextToSpeechClient(object):
"""Service that implements Google Cloud Text-to-Speech API."""
SERVICE_ADDRESS = "texttospeech.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.texttospeech.v1beta1.TextToSpeech"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TextToSpeechClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.TextToSpeechGrpcTransport,
Callable[[~.Credentials, type], ~.TextToSpeechGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = text_to_speech_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=text_to_speech_grpc_transport.TextToSpeechGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = text_to_speech_grpc_transport.TextToSpeechGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_voices(
self,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns a list of ``Voice`` supported for synthesis.
Example:
>>> from google.cloud import texttospeech_v1beta1
>>>
>>> client = texttospeech_v1beta1.TextToSpeechClient()
>>>
>>> response = client.list_voices()
Args:
language_code (str): Optional (but recommended)
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__ language tag.
If specified, the ListVoices call will only return voices that can be
used to synthesize this language\_code. E.g. when specifying "en-NZ",
you will get supported "en-*" voices; when specifying "no", you will get
supported "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices;
specifying "zh" will also get supported "cmn-*" voices; specifying
"zh-hk" will also get supported "yue-\*" voices.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.texttospeech_v1beta1.types.ListVoicesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_voices" not in self._inner_api_calls:
self._inner_api_calls[
"list_voices"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_voices,
default_retry=self._method_configs["ListVoices"].retry,
default_timeout=self._method_configs["ListVoices"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.ListVoicesRequest(language_code=language_code)
return self._inner_api_calls["list_voices"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def synthesize_speech(
self,
input_,
voice,
audio_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Synthesizes speech synchronously: receive results after all text input
has been processed.
Example:
>>> from google.cloud import texttospeech_v1beta1
>>>
>>> client = texttospeech_v1beta1.TextToSpeechClient()
>>>
>>> # TODO: Initialize `input_`:
>>> input_ = {}
>>>
>>> # TODO: Initialize `voice`:
>>> voice = {}
>>>
>>> # TODO: Initialize `audio_config`:
>>> audio_config = {}
>>>
>>> response = client.synthesize_speech(input_, voice, audio_config)
Args:
input_ (Union[dict, ~google.cloud.texttospeech_v1beta1.types.SynthesisInput]): Required. The Synthesizer requires either plain text or SSML as input.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.SynthesisInput`
voice (Union[dict, ~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams]): Required. The desired voice of the synthesized audio.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams`
audio_config (Union[dict, ~google.cloud.texttospeech_v1beta1.types.AudioConfig]): Required. The configuration of the synthesized audio.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.AudioConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.texttospeech_v1beta1.types.SynthesizeSpeechResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "synthesize_speech" not in self._inner_api_calls:
self._inner_api_calls[
"synthesize_speech"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.synthesize_speech,
default_retry=self._method_configs["SynthesizeSpeech"].retry,
default_timeout=self._method_configs["SynthesizeSpeech"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.SynthesizeSpeechRequest(
input=input_, voice=voice, audio_config=audio_config
)
return self._inner_api_calls["synthesize_speech"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
keras_convert_hdf5_model_to_tf_saved_model | Converts Keras HDF5 model to Tensorflow SavedModel format.
Args:
model_path: Keras model in HDF5 format.
converted_model_path: Keras model in Tensorflow SavedModel format.
Annotations:
author: Alexey Volkov <[email protected]> | from kfp.components import create_component_from_func, InputPath, OutputPath
# MASKED: keras_convert_hdf5_model_to_tf_saved_model function (lines 3-20)
if __name__ == '__main__':
keras_convert_hdf5_model_to_tf_saved_model_op = create_component_from_func(
keras_convert_hdf5_model_to_tf_saved_model,
base_image='tensorflow/tensorflow:2.3.0',
packages_to_install=['h5py==2.10.0'],
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.yaml",
},
) | def keras_convert_hdf5_model_to_tf_saved_model(
model_path: InputPath('KerasModelHdf5'),
converted_model_path: OutputPath('TensorflowSavedModel'),
):
'''Converts Keras HDF5 model to Tensorflow SavedModel format.
Args:
model_path: Keras model in HDF5 format.
converted_model_path: Keras model in Tensorflow SavedModel format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
from tensorflow import keras
model = keras.models.load_model(filepath=model_path)
keras.models.save_model(model=model, filepath=converted_model_path, save_format='tf') | 3 | 20 | from kfp.components import create_component_from_func, InputPath, OutputPath
def keras_convert_hdf5_model_to_tf_saved_model(
model_path: InputPath('KerasModelHdf5'),
converted_model_path: OutputPath('TensorflowSavedModel'),
):
'''Converts Keras HDF5 model to Tensorflow SavedModel format.
Args:
model_path: Keras model in HDF5 format.
converted_model_path: Keras model in Tensorflow SavedModel format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
from tensorflow import keras
model = keras.models.load_model(filepath=model_path)
keras.models.save_model(model=model, filepath=converted_model_path, save_format='tf')
if __name__ == '__main__':
keras_convert_hdf5_model_to_tf_saved_model_op = create_component_from_func(
keras_convert_hdf5_model_to_tf_saved_model,
base_image='tensorflow/tensorflow:2.3.0',
packages_to_install=['h5py==2.10.0'],
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.yaml",
},
)
|
filter_with_CNNPatientUI | Check the list, only retain the relevant records with matching PatientID are retained.
:param dataset: CNBPIDs & record ID correspondence list.
:param CNNPatientUI:
:return: | import sys
from query_common import filter_records, ProjectMixins
from redcap import Project # note this is from PyCap.redcap
from typing import List
"""
This class of functions are responsible of retrieving relevant data structures from the CNFUN tables
"""
class CNFUN_project(ProjectMixins):
"""
One baby can have many admissions CaseIDs.
One hospital record can have many CaseIDs.
One baby has only one hospital record number.
"""
def __init__(
self, Token, URL, get_all_field=False,
):
"""
Create a project using PyCap
:param Token:
:param URL:
:return:
"""
# Several key properties we'll use throughout
self.project = Project(URL, Token)
# These are very important ID fields from the
fields_keyid = ["patientID", "cf_p_cnnpatientui"]
# For now, make sure to onyl get the data related to these key ids to reduce load time
self.data = self.get_fields(fields_keyid)
# if specified, get all the records.
if get_all_field:
self.data = self.project.export_records()
# MASKED: filter_with_CNNPatientUI function (lines 39-56)
def get_PatientID_with_CNNPatientUI(self, CNNPatientUI: str or List[str]):
"""
PatientID has 1:1 correspondence with CNNPatientUI which is the same as PatientUI from CNN Baby table.
:return:
"""
# Listify the CNNPatientUI
if type(CNNPatientUI) is str:
CNNPatientUI = [CNNPatientUI]
# Filter with the information
list_filtered_dict = self.filter_with_CNNPatientUI(CNNPatientUI)
# Aggregate the list_PatientID
list_PatientID = []
for case in list_filtered_dict:
list_PatientID.append(case["patientid"])
return list_PatientID
def get_records_CNFUN(self, PatientID: str or List[str]):
"""
Retrieve the cases based on their INDEX which is the
:param cases:
:return:
"""
if type(PatientID) is str:
PatientID = [PatientID]
cases_data = self.project.export_records(records=PatientID)
return cases_data | def filter_with_CNNPatientUI(self, CNNPatientUI: str or List[str]):
"""
Check the list, only retain the relevant records with matching PatientID are retained.
:param dataset: CNBPIDs & record ID correspondence list.
:param CNNPatientUI:
:return:
"""
list_filtered = None
filtered_field = "cf_p_cnnpatientui"
# Handling when babyIDs is string instead of list (allowing batch function).
if type(CNNPatientUI) is str:
CNNPatientUI = [CNNPatientUI]
list_filtered = filter_records(self.data, filtered_field, CNNPatientUI)
return list_filtered | 39 | 56 | import sys
from query_common import filter_records, ProjectMixins
from redcap import Project # note this is from PyCap.redcap
from typing import List
"""
This class of functions are responsible of retrieving relevant data structures from the CNFUN tables
"""
class CNFUN_project(ProjectMixins):
"""
One baby can have many admissions CaseIDs.
One hospital record can have many CaseIDs.
One baby has only one hospital record number.
"""
def __init__(
self, Token, URL, get_all_field=False,
):
"""
Create a project using PyCap
:param Token:
:param URL:
:return:
"""
# Several key properties we'll use throughout
self.project = Project(URL, Token)
# These are very important ID fields from the
fields_keyid = ["patientID", "cf_p_cnnpatientui"]
# For now, make sure to onyl get the data related to these key ids to reduce load time
self.data = self.get_fields(fields_keyid)
# if specified, get all the records.
if get_all_field:
self.data = self.project.export_records()
def filter_with_CNNPatientUI(self, CNNPatientUI: str or List[str]):
"""
Check the list, only retain the relevant records with matching PatientID are retained.
:param dataset: CNBPIDs & record ID correspondence list.
:param CNNPatientUI:
:return:
"""
list_filtered = None
filtered_field = "cf_p_cnnpatientui"
# Handling when babyIDs is string instead of list (allowing batch function).
if type(CNNPatientUI) is str:
CNNPatientUI = [CNNPatientUI]
list_filtered = filter_records(self.data, filtered_field, CNNPatientUI)
return list_filtered
def get_PatientID_with_CNNPatientUI(self, CNNPatientUI: str or List[str]):
"""
PatientID has 1:1 correspondence with CNNPatientUI which is the same as PatientUI from CNN Baby table.
:return:
"""
# Listify the CNNPatientUI
if type(CNNPatientUI) is str:
CNNPatientUI = [CNNPatientUI]
# Filter with the information
list_filtered_dict = self.filter_with_CNNPatientUI(CNNPatientUI)
# Aggregate the list_PatientID
list_PatientID = []
for case in list_filtered_dict:
list_PatientID.append(case["patientid"])
return list_PatientID
def get_records_CNFUN(self, PatientID: str or List[str]):
"""
Retrieve the cases based on their INDEX which is the
:param cases:
:return:
"""
if type(PatientID) is str:
PatientID = [PatientID]
cases_data = self.project.export_records(records=PatientID)
return cases_data
|
_calculate_reciprocal_rank | Calculate the reciprocal rank for a given hypothesis and reference
Params:
hypothesis_ids: Iterator of hypothesis ids (as numpy array) ordered by its relevance
reference_id: Reference id (as a integer) of the correct id of response
Returns:
reciprocal rank | # Import dependencies
# Math/Torch
import numpy as np
import torch.nn as nn
# Typing
from typing import List
# Instantiate class
class MRR(nn.Module):
"""Compute MRR metric (Mean reciprocal rank)"""
def __init__(self, max_rank = 10):
super(MRR, self).__init__()
# Set max mrr rank
self.max_rank = max_rank
# MASKED: _calculate_reciprocal_rank function (lines 20-47)
def forward(self, batch_hypothesis_ids: List[np.ndarray], batch_reference_id: List[int]) -> float:
"""Score the mean reciprocal rank for the batch
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> batch_hypothesis_ids = [[1, 0, 2], [0, 2, 1], [1, 0, 2]]
>>> batch_reference_id = [2, 2, 1]
>>> mrr = MRR()
>>> mrr(batch_hypothesis_ids, batch_reference_id)
0.61111111111111105
Args:
batch_hypothesis_ids: Batch of hypothesis ids (as numpy array) ordered by its relevance
reference_id: Batch of reference id (as a integer) of the correct id of response
Returns:
Mean reciprocal rank (MRR)
"""
# Assure batches have same length
assert len(batch_hypothesis_ids) == len(batch_reference_id), "Hypothesis batch and reference batch must have same length."
# Size of batch
batch_size = len(batch_hypothesis_ids)
# MRR to be calculated
mrr = 0
for hypothesis_ids, reference_id in zip(batch_hypothesis_ids, batch_reference_id):
# Calculate reciprocal rank
reciprocal_rank = self._calculate_reciprocal_rank(hypothesis_ids, reference_id)
# Add to MRR
mrr += reciprocal_rank/batch_size
return mrr | def _calculate_reciprocal_rank(self, hypothesis_ids: np.ndarray, reference_id: int) -> float:
"""Calculate the reciprocal rank for a given hypothesis and reference
Params:
hypothesis_ids: Iterator of hypothesis ids (as numpy array) ordered by its relevance
reference_id: Reference id (as a integer) of the correct id of response
Returns:
reciprocal rank
"""
# Assure hypothesis_ids is a numpy array
hypothesis_ids = np.asarray(hypothesis_ids)
# Calculate rank
try:
rank = np.where(hypothesis_ids == reference_id)[0][0] + 1
except IndexError:
rank = self.max_rank + 1
# Rank grater then max_rank is set to zero
if rank > self.max_rank:
reciprocal_rank = 0.0
else:
# Calculate reciprocal rank
reciprocal_rank = 1. / rank
return reciprocal_rank | 20 | 47 | # Import dependencies
# Math/Torch
import numpy as np
import torch.nn as nn
# Typing
from typing import List
# Instantiate class
class MRR(nn.Module):
"""Compute MRR metric (Mean reciprocal rank)"""
def __init__(self, max_rank = 10):
super(MRR, self).__init__()
# Set max mrr rank
self.max_rank = max_rank
def _calculate_reciprocal_rank(self, hypothesis_ids: np.ndarray, reference_id: int) -> float:
"""Calculate the reciprocal rank for a given hypothesis and reference
Params:
hypothesis_ids: Iterator of hypothesis ids (as numpy array) ordered by its relevance
reference_id: Reference id (as a integer) of the correct id of response
Returns:
reciprocal rank
"""
# Assure hypothesis_ids is a numpy array
hypothesis_ids = np.asarray(hypothesis_ids)
# Calculate rank
try:
rank = np.where(hypothesis_ids == reference_id)[0][0] + 1
except IndexError:
rank = self.max_rank + 1
# Rank grater then max_rank is set to zero
if rank > self.max_rank:
reciprocal_rank = 0.0
else:
# Calculate reciprocal rank
reciprocal_rank = 1. / rank
return reciprocal_rank
def forward(self, batch_hypothesis_ids: List[np.ndarray], batch_reference_id: List[int]) -> float:
"""Score the mean reciprocal rank for the batch
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> batch_hypothesis_ids = [[1, 0, 2], [0, 2, 1], [1, 0, 2]]
>>> batch_reference_id = [2, 2, 1]
>>> mrr = MRR()
>>> mrr(batch_hypothesis_ids, batch_reference_id)
0.61111111111111105
Args:
batch_hypothesis_ids: Batch of hypothesis ids (as numpy array) ordered by its relevance
reference_id: Batch of reference id (as a integer) of the correct id of response
Returns:
Mean reciprocal rank (MRR)
"""
# Assure batches have same length
assert len(batch_hypothesis_ids) == len(batch_reference_id), "Hypothesis batch and reference batch must have same length."
# Size of batch
batch_size = len(batch_hypothesis_ids)
# MRR to be calculated
mrr = 0
for hypothesis_ids, reference_id in zip(batch_hypothesis_ids, batch_reference_id):
# Calculate reciprocal rank
reciprocal_rank = self._calculate_reciprocal_rank(hypothesis_ids, reference_id)
# Add to MRR
mrr += reciprocal_rank/batch_size
return mrr
|
prepare_for_launch | Load config, figure out working directory, create runner.
- when args.config_file is empty, returned cfg will be the default one
- returned output_dir will always be non empty, args.output_dir has higher
priority than cfg.OUTPUT_DIR. | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import os
import time
import detectron2.utils.comm as comm
import torch
from d2go.config import (
CfgNode as CN,
auto_scale_world_size,
reroute_config_path,
temp_defrost,
)
from d2go.distributed import get_local_rank, get_num_processes_per_machine
from d2go.runner import GeneralizedRCNNRunner, create_runner
from d2go.utils.launch_environment import get_launch_environment
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.logger import setup_logger
from detectron2.utils.serialize import PicklableWrapper
from d2go.utils.helper import run_once
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.py import FolderLock, MultiprocessingPdb, post_mortem_if_fail
logger = logging.getLogger(__name__)
def basic_argument_parser(
distributed=True,
requires_config_file=True,
requires_output_dir=True,
):
""" Basic cli tool parser for Detectron2Go binaries """
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--runner",
type=str,
default="d2go.runner.GeneralizedRCNNRunner",
help="Full class name, i.e. (package.)module.class",
)
parser.add_argument(
"--config-file",
help="path to config file",
default="",
required=requires_config_file,
metavar="FILE",
)
parser.add_argument(
"--output-dir",
help="When given, this will override the OUTPUT_DIR in the config-file",
required=requires_output_dir,
default=None,
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
if distributed:
parser.add_argument(
"--num-processes", type=int, default=1, help="number of gpus per machine"
)
parser.add_argument("--num-machines", type=int, default=1)
parser.add_argument(
"--machine-rank",
type=int,
default=0,
help="the rank of this machine (unique per machine)",
)
parser.add_argument(
"--dist-url", default="file:///tmp/d2go_dist_file_{}".format(time.time())
)
parser.add_argument("--dist-backend", type=str, default="NCCL")
if not requires_config_file:
# NOTE if not passing yaml file, user should explicitly set the
# following args, and use `opts` for non-common usecase.
parser.add_argument(
"--datasets",
type=str,
nargs="+",
required=True,
help="cfg.DATASETS.TEST",
)
parser.add_argument(
"--min_size",
type=int,
required=True,
help="cfg.INPUT.MIN_SIZE_TEST",
)
parser.add_argument(
"--max_size",
type=int,
required=True,
help="cfg.INPUT.MAX_SIZE_TEST",
)
return parser
return parser
def create_cfg_from_cli_args(args, default_cfg):
"""
Instead of loading from defaults.py, this binary only includes necessary
configs building from scratch, and overrides them from args. There're two
levels of config:
_C: the config system used by this binary, which is a sub-set of training
config, override by configurable_cfg. It can also be override by
args.opts for convinience.
configurable_cfg: common configs that user should explicitly specify
in the args.
"""
_C = CN()
_C.INPUT = default_cfg.INPUT
_C.DATASETS = default_cfg.DATASETS
_C.DATALOADER = default_cfg.DATALOADER
_C.TEST = default_cfg.TEST
if hasattr(default_cfg, "D2GO_DATA"):
_C.D2GO_DATA = default_cfg.D2GO_DATA
if hasattr(default_cfg, "TENSORBOARD"):
_C.TENSORBOARD = default_cfg.TENSORBOARD
# NOTE configs below might not be necessary, but must add to make code work
_C.MODEL = CN()
_C.MODEL.META_ARCHITECTURE = default_cfg.MODEL.META_ARCHITECTURE
_C.MODEL.MASK_ON = default_cfg.MODEL.MASK_ON
_C.MODEL.KEYPOINT_ON = default_cfg.MODEL.KEYPOINT_ON
_C.MODEL.LOAD_PROPOSALS = default_cfg.MODEL.LOAD_PROPOSALS
assert _C.MODEL.LOAD_PROPOSALS is False, "caffe2 model doesn't support"
_C.OUTPUT_DIR = args.output_dir
configurable_cfg = [
"DATASETS.TEST",
args.datasets,
"INPUT.MIN_SIZE_TEST",
args.min_size,
"INPUT.MAX_SIZE_TEST",
args.max_size,
]
cfg = _C.clone()
cfg.merge_from_list(configurable_cfg)
cfg.merge_from_list(args.opts)
return cfg
# MASKED: prepare_for_launch function (lines 157-179)
def setup_after_launch(cfg, output_dir, runner):
"""
Set things up after entering DDP, including
- creating working directory
- setting up logger
- logging environment
- initializing runner
"""
create_dir_on_global_main_process(output_dir)
comm.synchronize()
setup_loggers(output_dir)
cfg.freeze()
if cfg.OUTPUT_DIR != output_dir:
with temp_defrost(cfg):
logger.warning(
"Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}".format(
cfg.OUTPUT_DIR, output_dir
)
)
cfg.OUTPUT_DIR = output_dir
logger.info("Initializing runner ...")
runner = initialize_runner(runner, cfg)
log_info(cfg, runner)
dump_cfg(cfg, os.path.join(output_dir, "config.yaml"))
auto_scale_world_size(cfg, new_world_size=comm.get_world_size())
@run_once()
def setup_loggers(output_dir, color=None):
if not color:
color = get_launch_environment() == "local"
d2_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="detectron2",
abbrev_name="d2",
)
fvcore_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="fvcore",
)
d2go_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="d2go",
abbrev_name="d2go",
)
mobile_cv_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="mobile_cv",
abbrev_name="mobile_cv",
)
# NOTE: all above loggers have FileHandler pointing to the same file as d2_logger.
# Those files are opened upon creation, but it seems fine in 'a' mode.
# NOTE: the root logger might has been configured by other applications,
# since this already sub-top level, just don't propagate to root.
d2_logger.propagate = False
fvcore_logger.propagate = False
d2go_logger.propagate = False
mobile_cv_logger.propagate = False
def log_info(cfg, runner):
num_processes = get_num_processes_per_machine()
logger.info(
"Using {} processes per machine. Rank of current process: {}".format(
num_processes, comm.get_rank()
)
)
logger.info("Environment info:\n" + collect_env_info())
logger.info("Running with full config:\n{}".format(cfg))
logger.info("Running with runner: {}".format(runner))
def dump_cfg(cfg, path):
if comm.is_main_process():
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
logger.info("Full config saved to {}".format(path))
def create_dir_on_local_main_process(dir):
if get_local_rank() == 0 and dir:
PathManager.mkdirs(dir)
def create_dir_on_global_main_process(dir):
if comm.get_rank() == 0 and dir:
PathManager.mkdirs(dir)
def initialize_runner(runner, cfg):
runner = runner or GeneralizedRCNNRunner()
runner._initialize(cfg)
return runner
def caffe2_global_init(logging_print_net_summary=0, num_threads=None):
if num_threads is None:
if get_num_processes_per_machine() > 1:
# by default use single thread when DDP with multiple processes
num_threads = 1
else:
# GlobalInit will clean PyTorch's num_threads and set it to 1,
# thus keep PyTorch's default value to make it truly default.
num_threads = torch.get_num_threads()
if not get_local_rank() == 0:
logging_print_net_summary = 0 # only enable for local main process
from caffe2.python import workspace
workspace.GlobalInit(
[
"caffe2",
"--caffe2_log_level=2",
"--caffe2_logging_print_net_summary={}".format(logging_print_net_summary),
"--caffe2_omp_num_threads={}".format(num_threads),
"--caffe2_mkl_num_threads={}".format(num_threads),
]
)
logger.info("Using {} threads after GlobalInit".format(torch.get_num_threads()))
def post_mortem_if_fail_for_main(main_func):
def new_main_func(cfg, output_dir, *args, **kwargs):
pdb_ = (
MultiprocessingPdb(FolderLock(output_dir))
if comm.get_world_size() > 1
else None # fallback to use normal pdb for single process
)
return post_mortem_if_fail(pdb_)(main_func)(cfg, output_dir, *args, **kwargs)
return PicklableWrapper(new_main_func) | def prepare_for_launch(args):
"""
Load config, figure out working directory, create runner.
- when args.config_file is empty, returned cfg will be the default one
- returned output_dir will always be non empty, args.output_dir has higher
priority than cfg.OUTPUT_DIR.
"""
print(args)
runner = create_runner(args.runner)
cfg = runner.get_default_cfg()
if args.config_file:
with PathManager.open(reroute_config_path(args.config_file), "r") as f:
print("Loaded config file {}:\n{}".format(args.config_file, f.read()))
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
else:
cfg = create_cfg_from_cli_args(args, default_cfg=cfg)
cfg.freeze()
assert args.output_dir or args.config_file
output_dir = args.output_dir or cfg.OUTPUT_DIR
return cfg, output_dir, runner | 157 | 179 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import os
import time
import detectron2.utils.comm as comm
import torch
from d2go.config import (
CfgNode as CN,
auto_scale_world_size,
reroute_config_path,
temp_defrost,
)
from d2go.distributed import get_local_rank, get_num_processes_per_machine
from d2go.runner import GeneralizedRCNNRunner, create_runner
from d2go.utils.launch_environment import get_launch_environment
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.logger import setup_logger
from detectron2.utils.serialize import PicklableWrapper
from d2go.utils.helper import run_once
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.py import FolderLock, MultiprocessingPdb, post_mortem_if_fail
logger = logging.getLogger(__name__)
def basic_argument_parser(
distributed=True,
requires_config_file=True,
requires_output_dir=True,
):
""" Basic cli tool parser for Detectron2Go binaries """
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--runner",
type=str,
default="d2go.runner.GeneralizedRCNNRunner",
help="Full class name, i.e. (package.)module.class",
)
parser.add_argument(
"--config-file",
help="path to config file",
default="",
required=requires_config_file,
metavar="FILE",
)
parser.add_argument(
"--output-dir",
help="When given, this will override the OUTPUT_DIR in the config-file",
required=requires_output_dir,
default=None,
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
if distributed:
parser.add_argument(
"--num-processes", type=int, default=1, help="number of gpus per machine"
)
parser.add_argument("--num-machines", type=int, default=1)
parser.add_argument(
"--machine-rank",
type=int,
default=0,
help="the rank of this machine (unique per machine)",
)
parser.add_argument(
"--dist-url", default="file:///tmp/d2go_dist_file_{}".format(time.time())
)
parser.add_argument("--dist-backend", type=str, default="NCCL")
if not requires_config_file:
# NOTE if not passing yaml file, user should explicitly set the
# following args, and use `opts` for non-common usecase.
parser.add_argument(
"--datasets",
type=str,
nargs="+",
required=True,
help="cfg.DATASETS.TEST",
)
parser.add_argument(
"--min_size",
type=int,
required=True,
help="cfg.INPUT.MIN_SIZE_TEST",
)
parser.add_argument(
"--max_size",
type=int,
required=True,
help="cfg.INPUT.MAX_SIZE_TEST",
)
return parser
return parser
def create_cfg_from_cli_args(args, default_cfg):
"""
Instead of loading from defaults.py, this binary only includes necessary
configs building from scratch, and overrides them from args. There're two
levels of config:
_C: the config system used by this binary, which is a sub-set of training
config, override by configurable_cfg. It can also be override by
args.opts for convinience.
configurable_cfg: common configs that user should explicitly specify
in the args.
"""
_C = CN()
_C.INPUT = default_cfg.INPUT
_C.DATASETS = default_cfg.DATASETS
_C.DATALOADER = default_cfg.DATALOADER
_C.TEST = default_cfg.TEST
if hasattr(default_cfg, "D2GO_DATA"):
_C.D2GO_DATA = default_cfg.D2GO_DATA
if hasattr(default_cfg, "TENSORBOARD"):
_C.TENSORBOARD = default_cfg.TENSORBOARD
# NOTE configs below might not be necessary, but must add to make code work
_C.MODEL = CN()
_C.MODEL.META_ARCHITECTURE = default_cfg.MODEL.META_ARCHITECTURE
_C.MODEL.MASK_ON = default_cfg.MODEL.MASK_ON
_C.MODEL.KEYPOINT_ON = default_cfg.MODEL.KEYPOINT_ON
_C.MODEL.LOAD_PROPOSALS = default_cfg.MODEL.LOAD_PROPOSALS
assert _C.MODEL.LOAD_PROPOSALS is False, "caffe2 model doesn't support"
_C.OUTPUT_DIR = args.output_dir
configurable_cfg = [
"DATASETS.TEST",
args.datasets,
"INPUT.MIN_SIZE_TEST",
args.min_size,
"INPUT.MAX_SIZE_TEST",
args.max_size,
]
cfg = _C.clone()
cfg.merge_from_list(configurable_cfg)
cfg.merge_from_list(args.opts)
return cfg
def prepare_for_launch(args):
"""
Load config, figure out working directory, create runner.
- when args.config_file is empty, returned cfg will be the default one
- returned output_dir will always be non empty, args.output_dir has higher
priority than cfg.OUTPUT_DIR.
"""
print(args)
runner = create_runner(args.runner)
cfg = runner.get_default_cfg()
if args.config_file:
with PathManager.open(reroute_config_path(args.config_file), "r") as f:
print("Loaded config file {}:\n{}".format(args.config_file, f.read()))
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
else:
cfg = create_cfg_from_cli_args(args, default_cfg=cfg)
cfg.freeze()
assert args.output_dir or args.config_file
output_dir = args.output_dir or cfg.OUTPUT_DIR
return cfg, output_dir, runner
def setup_after_launch(cfg, output_dir, runner):
"""
Set things up after entering DDP, including
- creating working directory
- setting up logger
- logging environment
- initializing runner
"""
create_dir_on_global_main_process(output_dir)
comm.synchronize()
setup_loggers(output_dir)
cfg.freeze()
if cfg.OUTPUT_DIR != output_dir:
with temp_defrost(cfg):
logger.warning(
"Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}".format(
cfg.OUTPUT_DIR, output_dir
)
)
cfg.OUTPUT_DIR = output_dir
logger.info("Initializing runner ...")
runner = initialize_runner(runner, cfg)
log_info(cfg, runner)
dump_cfg(cfg, os.path.join(output_dir, "config.yaml"))
auto_scale_world_size(cfg, new_world_size=comm.get_world_size())
@run_once()
def setup_loggers(output_dir, color=None):
if not color:
color = get_launch_environment() == "local"
d2_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="detectron2",
abbrev_name="d2",
)
fvcore_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="fvcore",
)
d2go_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="d2go",
abbrev_name="d2go",
)
mobile_cv_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="mobile_cv",
abbrev_name="mobile_cv",
)
# NOTE: all above loggers have FileHandler pointing to the same file as d2_logger.
# Those files are opened upon creation, but it seems fine in 'a' mode.
# NOTE: the root logger might has been configured by other applications,
# since this already sub-top level, just don't propagate to root.
d2_logger.propagate = False
fvcore_logger.propagate = False
d2go_logger.propagate = False
mobile_cv_logger.propagate = False
def log_info(cfg, runner):
num_processes = get_num_processes_per_machine()
logger.info(
"Using {} processes per machine. Rank of current process: {}".format(
num_processes, comm.get_rank()
)
)
logger.info("Environment info:\n" + collect_env_info())
logger.info("Running with full config:\n{}".format(cfg))
logger.info("Running with runner: {}".format(runner))
def dump_cfg(cfg, path):
if comm.is_main_process():
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
logger.info("Full config saved to {}".format(path))
def create_dir_on_local_main_process(dir):
if get_local_rank() == 0 and dir:
PathManager.mkdirs(dir)
def create_dir_on_global_main_process(dir):
if comm.get_rank() == 0 and dir:
PathManager.mkdirs(dir)
def initialize_runner(runner, cfg):
runner = runner or GeneralizedRCNNRunner()
runner._initialize(cfg)
return runner
def caffe2_global_init(logging_print_net_summary=0, num_threads=None):
if num_threads is None:
if get_num_processes_per_machine() > 1:
# by default use single thread when DDP with multiple processes
num_threads = 1
else:
# GlobalInit will clean PyTorch's num_threads and set it to 1,
# thus keep PyTorch's default value to make it truly default.
num_threads = torch.get_num_threads()
if not get_local_rank() == 0:
logging_print_net_summary = 0 # only enable for local main process
from caffe2.python import workspace
workspace.GlobalInit(
[
"caffe2",
"--caffe2_log_level=2",
"--caffe2_logging_print_net_summary={}".format(logging_print_net_summary),
"--caffe2_omp_num_threads={}".format(num_threads),
"--caffe2_mkl_num_threads={}".format(num_threads),
]
)
logger.info("Using {} threads after GlobalInit".format(torch.get_num_threads()))
def post_mortem_if_fail_for_main(main_func):
def new_main_func(cfg, output_dir, *args, **kwargs):
pdb_ = (
MultiprocessingPdb(FolderLock(output_dir))
if comm.get_world_size() > 1
else None # fallback to use normal pdb for single process
)
return post_mortem_if_fail(pdb_)(main_func)(cfg, output_dir, *args, **kwargs)
return PicklableWrapper(new_main_func)
|
__init__ | Creates a new window for user to input
which regions to add to scene.
Arguments:
----------
main_window: reference to the App's main window
palette: main_window's palette, used to style widgets | from qtpy.QtWidgets import QDialog, QLineEdit, QPushButton, QLabel, QVBoxLayout
from brainrender_gui.style import style, update_css
class AddRegionsWindow(QDialog):
left = 250
top = 250
width = 400
height = 300
label_msg = (
"Write the acronyms of brainregions "
+ "you wish to add.\n[as 'space' separated strings (e.g.: STN TH)]"
)
# MASKED: __init__ function (lines 16-31)
def ui(self):
"""
Define UI's elements
"""
self.setGeometry(self.left, self.top, self.width, self.height)
layout = QVBoxLayout()
# Regions
label = QLabel(self)
label.setObjectName("PopupLabel")
label.setText(self.label_msg)
self.textbox = QLineEdit(self)
# Alpha
alpha_label = QLabel(self)
alpha_label.setObjectName("PopupLabel")
alpha_label.setText("Alpha")
self.alpha_textbox = QLineEdit(self)
self.alpha_textbox.setText(str(1.0))
# Color
color_label = QLabel(self)
color_label.setObjectName("PopupLabel")
color_label.setText("Color")
self.color_textbox = QLineEdit(self)
self.color_textbox.setText("atlas")
# Create a button in the window
self.button = QPushButton("Add regions", self)
self.button.clicked.connect(self.on_click)
self.button.setObjectName("RegionsButton")
layout.addWidget(label)
layout.addWidget(self.textbox)
layout.addWidget(alpha_label)
layout.addWidget(self.alpha_textbox)
layout.addWidget(color_label)
layout.addWidget(self.color_textbox)
layout.addWidget(self.button)
self.setLayout(layout)
self.show()
def on_click(self):
"""
On click or 'Enter' get the regions
from the input and call the add_regions
method of the main window
"""
regions = self.textbox.text().split(" ")
self.main_window.add_regions(
regions, self.alpha_textbox.text(), self.color_textbox.text()
)
self.close() | def __init__(self, main_window, palette):
"""
Creates a new window for user to input
which regions to add to scene.
Arguments:
----------
main_window: reference to the App's main window
palette: main_window's palette, used to style widgets
"""
super().__init__()
self.setWindowTitle("Add brain regions")
self.ui()
self.main_window = main_window
self.setStyleSheet(update_css(style, palette)) | 16 | 31 | from qtpy.QtWidgets import QDialog, QLineEdit, QPushButton, QLabel, QVBoxLayout
from brainrender_gui.style import style, update_css
class AddRegionsWindow(QDialog):
left = 250
top = 250
width = 400
height = 300
label_msg = (
"Write the acronyms of brainregions "
+ "you wish to add.\n[as 'space' separated strings (e.g.: STN TH)]"
)
def __init__(self, main_window, palette):
"""
Creates a new window for user to input
which regions to add to scene.
Arguments:
----------
main_window: reference to the App's main window
palette: main_window's palette, used to style widgets
"""
super().__init__()
self.setWindowTitle("Add brain regions")
self.ui()
self.main_window = main_window
self.setStyleSheet(update_css(style, palette))
def ui(self):
"""
Define UI's elements
"""
self.setGeometry(self.left, self.top, self.width, self.height)
layout = QVBoxLayout()
# Regions
label = QLabel(self)
label.setObjectName("PopupLabel")
label.setText(self.label_msg)
self.textbox = QLineEdit(self)
# Alpha
alpha_label = QLabel(self)
alpha_label.setObjectName("PopupLabel")
alpha_label.setText("Alpha")
self.alpha_textbox = QLineEdit(self)
self.alpha_textbox.setText(str(1.0))
# Color
color_label = QLabel(self)
color_label.setObjectName("PopupLabel")
color_label.setText("Color")
self.color_textbox = QLineEdit(self)
self.color_textbox.setText("atlas")
# Create a button in the window
self.button = QPushButton("Add regions", self)
self.button.clicked.connect(self.on_click)
self.button.setObjectName("RegionsButton")
layout.addWidget(label)
layout.addWidget(self.textbox)
layout.addWidget(alpha_label)
layout.addWidget(self.alpha_textbox)
layout.addWidget(color_label)
layout.addWidget(self.color_textbox)
layout.addWidget(self.button)
self.setLayout(layout)
self.show()
def on_click(self):
"""
On click or 'Enter' get the regions
from the input and call the add_regions
method of the main window
"""
regions = self.textbox.text().split(" ")
self.main_window.add_regions(
regions, self.alpha_textbox.text(), self.color_textbox.text()
)
self.close()
|
binary_image_to_lut_indices | Convert a binary image to an index image that can be used with a lookup table
to perform morphological operations. Non-zero elements in the image are interpreted
as 1, zero elements as 0
:param x: a 2D NumPy array.
:return: a 2D NumPy array, same shape as x | import numpy as np
# Thinning morphological operation applied using lookup tables.
# We convert the 3x3 neighbourhood surrounding a pixel to an index
# used to lookup the output in a lookup table.
# Bit masks for each neighbour
# 1 2 4
# 8 16 32
# 64 128 256
NEIGH_MASK_EAST = 32
NEIGH_MASK_NORTH_EAST = 4
NEIGH_MASK_NORTH = 2
NEIGH_MASK_NORTH_WEST = 1
NEIGH_MASK_WEST = 8
NEIGH_MASK_SOUTH_WEST = 64
NEIGH_MASK_SOUTH = 128
NEIGH_MASK_SOUTH_EAST = 256
NEIGH_MASK_CENTRE = 16
# Masks in a list
# MASKS[0] = centre
# MASKS[1..8] = start from east, counter-clockwise
MASKS = [NEIGH_MASK_CENTRE,
NEIGH_MASK_EAST, NEIGH_MASK_NORTH_EAST, NEIGH_MASK_NORTH, NEIGH_MASK_NORTH_WEST,
NEIGH_MASK_WEST, NEIGH_MASK_SOUTH_WEST, NEIGH_MASK_SOUTH, NEIGH_MASK_SOUTH_EAST,
]
# Constant listing all indices
_LUT_INDS = np.arange(512)
# MASKED: binary_image_to_lut_indices function (lines 33-63)
def apply_lut(x, lut):
"""
Perform a morphological operation on the binary image x using the supplied lookup table
:param x:
:param lut:
:return:
"""
if lut.ndim != 1:
raise ValueError('lut should have 1 dimension, not {}'.format(lut.ndim))
if lut.shape[0] != 512:
raise ValueError('lut should have 512 entries, not {}'.format(lut.shape[0]))
lut_indices = binary_image_to_lut_indices(x)
return lut[lut_indices]
def identity_lut():
"""
Create identity lookup tablef
:return:
"""
lut = np.zeros((512,), dtype=bool)
inds = np.arange(512)
lut[(inds & NEIGH_MASK_CENTRE) != 0] = True
return lut
def _lut_mutate_mask(lut):
"""
Get a mask that shows which neighbourhood shapes result in changes to the image
:param lut: lookup table
:return: mask indicating which lookup indices result in changes
"""
return lut != identity_lut()
def lut_masks_zero(neigh):
"""
Create a LUT index mask for which the specified neighbour is 0
:param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour
:return: a LUT index mask
"""
if neigh > 8:
neigh -= 8
return (_LUT_INDS & MASKS[neigh]) == 0
def lut_masks_one(neigh):
"""
Create a LUT index mask for which the specified neighbour is 1
:param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour
:return: a LUT index mask
"""
if neigh > 8:
neigh -= 8
return (_LUT_INDS & MASKS[neigh]) != 0
def _thin_cond_g1():
"""
Thinning morphological operation; condition G1
:return: a LUT index mask
"""
b = np.zeros(512, dtype=int)
for i in range(1, 5):
b += lut_masks_zero(2 * i - 1) & (lut_masks_one(2 * i) | lut_masks_one(2 * i + 1))
return b == 1
def _thin_cond_g2():
"""
Thinning morphological operation; condition G2
:return: a LUT index mask
"""
n1 = np.zeros(512, dtype=int)
n2 = np.zeros(512, dtype=int)
for k in range(1, 5):
n1 += (lut_masks_one(2 * k - 1) | lut_masks_one(2 * k))
n2 += (lut_masks_one(2 * k) | lut_masks_one(2 * k + 1))
m = np.minimum(n1, n2)
return (m >= 2) & (m <= 3)
def _thin_cond_g3():
"""
Thinning morphological operation; condition G3
:return: a LUT index mask
"""
return ((lut_masks_one(2) | lut_masks_one(3) | lut_masks_zero(8)) & lut_masks_one(1)) == 0
def _thin_cond_g3_prime():
"""
Thinning morphological operation; condition G3'
:return: a LUT index mask
"""
return ((lut_masks_one(6) | lut_masks_one(7) | lut_masks_zero(4)) & lut_masks_one(5)) == 0
def _thin_iter_1_lut():
"""
Thinning morphological operation; lookup table for iteration 1
:return: lookup table
"""
lut = identity_lut()
cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3()
lut[cond] = False
return lut
def _thin_iter_2_lut():
"""
Thinning morphological operation; lookup table for iteration 2
:return: lookup table
"""
lut = identity_lut()
cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3_prime()
lut[cond] = False
return lut
def binary_thin(x, max_iter=None):
"""
Binary thinning morphological operation
:param x: a binary image, or an image that is to be converted to a binary image
:param max_iter: maximum number of iterations; default is `None` that results in an infinite
number of iterations (note that `binary_thin` will automatically terminate when no more changes occur)
:return:
"""
thin1 = _thin_iter_1_lut()
thin2 = _thin_iter_2_lut()
thin1_mut = _lut_mutate_mask(thin1)
thin2_mut = _lut_mutate_mask(thin2)
iter_count = 0
while max_iter is None or iter_count < max_iter:
# Iter 1
lut_indices = binary_image_to_lut_indices(x)
x_mut = thin1_mut[lut_indices]
if x_mut.sum() == 0:
break
x = thin1[lut_indices]
# Iter 2
lut_indices = binary_image_to_lut_indices(x)
x_mut = thin2_mut[lut_indices]
if x_mut.sum() == 0:
break
x = thin2[lut_indices]
iter_count += 1
return x | def binary_image_to_lut_indices(x):
"""
Convert a binary image to an index image that can be used with a lookup table
to perform morphological operations. Non-zero elements in the image are interpreted
as 1, zero elements as 0
:param x: a 2D NumPy array.
:return: a 2D NumPy array, same shape as x
"""
if x.ndim != 2:
raise ValueError('x should have 2 dimensions, not {}'.format(x.ndim))
# If the dtype of x is not bool, convert
if x.dtype != np.bool:
x = x != 0
# Add
x = np.pad(x, [(1, 1), (1, 1)], mode='constant')
# Convert to LUT indices
lut_indices = x[:-2, :-2] * NEIGH_MASK_NORTH_WEST + \
x[:-2, 1:-1] * NEIGH_MASK_NORTH + \
x[:-2, 2:] * NEIGH_MASK_NORTH_EAST + \
x[1:-1, :-2] * NEIGH_MASK_WEST + \
x[1:-1, 1:-1] * NEIGH_MASK_CENTRE + \
x[1:-1, 2:] * NEIGH_MASK_EAST + \
x[2:, :-2] * NEIGH_MASK_SOUTH_WEST + \
x[2:, 1:-1] * NEIGH_MASK_SOUTH + \
x[2:, 2:] * NEIGH_MASK_SOUTH_EAST
return lut_indices.astype(np.int32) | 33 | 63 | import numpy as np
# Thinning morphological operation applied using lookup tables.
# We convert the 3x3 neighbourhood surrounding a pixel to an index
# used to lookup the output in a lookup table.
# Bit masks for each neighbour
# 1 2 4
# 8 16 32
# 64 128 256
NEIGH_MASK_EAST = 32
NEIGH_MASK_NORTH_EAST = 4
NEIGH_MASK_NORTH = 2
NEIGH_MASK_NORTH_WEST = 1
NEIGH_MASK_WEST = 8
NEIGH_MASK_SOUTH_WEST = 64
NEIGH_MASK_SOUTH = 128
NEIGH_MASK_SOUTH_EAST = 256
NEIGH_MASK_CENTRE = 16
# Masks in a list
# MASKS[0] = centre
# MASKS[1..8] = start from east, counter-clockwise
MASKS = [NEIGH_MASK_CENTRE,
NEIGH_MASK_EAST, NEIGH_MASK_NORTH_EAST, NEIGH_MASK_NORTH, NEIGH_MASK_NORTH_WEST,
NEIGH_MASK_WEST, NEIGH_MASK_SOUTH_WEST, NEIGH_MASK_SOUTH, NEIGH_MASK_SOUTH_EAST,
]
# Constant listing all indices
_LUT_INDS = np.arange(512)
def binary_image_to_lut_indices(x):
"""
Convert a binary image to an index image that can be used with a lookup table
to perform morphological operations. Non-zero elements in the image are interpreted
as 1, zero elements as 0
:param x: a 2D NumPy array.
:return: a 2D NumPy array, same shape as x
"""
if x.ndim != 2:
raise ValueError('x should have 2 dimensions, not {}'.format(x.ndim))
# If the dtype of x is not bool, convert
if x.dtype != np.bool:
x = x != 0
# Add
x = np.pad(x, [(1, 1), (1, 1)], mode='constant')
# Convert to LUT indices
lut_indices = x[:-2, :-2] * NEIGH_MASK_NORTH_WEST + \
x[:-2, 1:-1] * NEIGH_MASK_NORTH + \
x[:-2, 2:] * NEIGH_MASK_NORTH_EAST + \
x[1:-1, :-2] * NEIGH_MASK_WEST + \
x[1:-1, 1:-1] * NEIGH_MASK_CENTRE + \
x[1:-1, 2:] * NEIGH_MASK_EAST + \
x[2:, :-2] * NEIGH_MASK_SOUTH_WEST + \
x[2:, 1:-1] * NEIGH_MASK_SOUTH + \
x[2:, 2:] * NEIGH_MASK_SOUTH_EAST
return lut_indices.astype(np.int32)
def apply_lut(x, lut):
"""
Perform a morphological operation on the binary image x using the supplied lookup table
:param x:
:param lut:
:return:
"""
if lut.ndim != 1:
raise ValueError('lut should have 1 dimension, not {}'.format(lut.ndim))
if lut.shape[0] != 512:
raise ValueError('lut should have 512 entries, not {}'.format(lut.shape[0]))
lut_indices = binary_image_to_lut_indices(x)
return lut[lut_indices]
def identity_lut():
"""
Create identity lookup tablef
:return:
"""
lut = np.zeros((512,), dtype=bool)
inds = np.arange(512)
lut[(inds & NEIGH_MASK_CENTRE) != 0] = True
return lut
def _lut_mutate_mask(lut):
"""
Get a mask that shows which neighbourhood shapes result in changes to the image
:param lut: lookup table
:return: mask indicating which lookup indices result in changes
"""
return lut != identity_lut()
def lut_masks_zero(neigh):
"""
Create a LUT index mask for which the specified neighbour is 0
:param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour
:return: a LUT index mask
"""
if neigh > 8:
neigh -= 8
return (_LUT_INDS & MASKS[neigh]) == 0
def lut_masks_one(neigh):
"""
Create a LUT index mask for which the specified neighbour is 1
:param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour
:return: a LUT index mask
"""
if neigh > 8:
neigh -= 8
return (_LUT_INDS & MASKS[neigh]) != 0
def _thin_cond_g1():
"""
Thinning morphological operation; condition G1
:return: a LUT index mask
"""
b = np.zeros(512, dtype=int)
for i in range(1, 5):
b += lut_masks_zero(2 * i - 1) & (lut_masks_one(2 * i) | lut_masks_one(2 * i + 1))
return b == 1
def _thin_cond_g2():
"""
Thinning morphological operation; condition G2
:return: a LUT index mask
"""
n1 = np.zeros(512, dtype=int)
n2 = np.zeros(512, dtype=int)
for k in range(1, 5):
n1 += (lut_masks_one(2 * k - 1) | lut_masks_one(2 * k))
n2 += (lut_masks_one(2 * k) | lut_masks_one(2 * k + 1))
m = np.minimum(n1, n2)
return (m >= 2) & (m <= 3)
def _thin_cond_g3():
"""
Thinning morphological operation; condition G3
:return: a LUT index mask
"""
return ((lut_masks_one(2) | lut_masks_one(3) | lut_masks_zero(8)) & lut_masks_one(1)) == 0
def _thin_cond_g3_prime():
"""
Thinning morphological operation; condition G3'
:return: a LUT index mask
"""
return ((lut_masks_one(6) | lut_masks_one(7) | lut_masks_zero(4)) & lut_masks_one(5)) == 0
def _thin_iter_1_lut():
"""
Thinning morphological operation; lookup table for iteration 1
:return: lookup table
"""
lut = identity_lut()
cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3()
lut[cond] = False
return lut
def _thin_iter_2_lut():
"""
Thinning morphological operation; lookup table for iteration 2
:return: lookup table
"""
lut = identity_lut()
cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3_prime()
lut[cond] = False
return lut
def binary_thin(x, max_iter=None):
"""
Binary thinning morphological operation
:param x: a binary image, or an image that is to be converted to a binary image
:param max_iter: maximum number of iterations; default is `None` that results in an infinite
number of iterations (note that `binary_thin` will automatically terminate when no more changes occur)
:return:
"""
thin1 = _thin_iter_1_lut()
thin2 = _thin_iter_2_lut()
thin1_mut = _lut_mutate_mask(thin1)
thin2_mut = _lut_mutate_mask(thin2)
iter_count = 0
while max_iter is None or iter_count < max_iter:
# Iter 1
lut_indices = binary_image_to_lut_indices(x)
x_mut = thin1_mut[lut_indices]
if x_mut.sum() == 0:
break
x = thin1[lut_indices]
# Iter 2
lut_indices = binary_image_to_lut_indices(x)
x_mut = thin2_mut[lut_indices]
if x_mut.sum() == 0:
break
x = thin2[lut_indices]
iter_count += 1
return x
|
play | # 1. Create a deck of 52 cards
# 2. Shuffle the deck
# 3. Ask the Player for their bet
# 4. Make sure that the Player's bet does not exceed their available chips
# 5. Deal two cards to the Dealer and two cards to the Player
# 6. Show only one of the Dealer's cards, the other remains hidden
# 7. Show both of the Player's cards
# 8. Ask the Player if they wish to Hit, and take another card
# 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.
# 10. If a Player Stands, play the Dealer's hand.
# The dealer will always Hit until the Dealer's value meets or exceeds 17
# 11. Determine the winner and adjust the Player's chips accordingly
# 12. Ask the Player if they'd like to play again | import random
class Card:
def __init__(self, suit, rank):
self.suit = suit
self.rank = rank
def __str__(self):
return f"{self.suit} {self.rank}: {BlackJack.values[self.rank]}"
class Hand:
def __init__(self):
self.cards = [] # start with empty list
self.value = 0
self.aces = 0
def adjust_for_ace(self):
while self.value > 21 and self.aces:
self.value -= 10
self.aces -= 1
def add_card(self, card):
self.cards.append(card)
self.value += BlackJack.values[card.rank]
if card.rank == 'Ace':
self.aces += 1
def __str__(self):
return f"Current Hand:{self.cards}\nCurrent Value:{self.value}\nCurrent Aces:{self.aces}\n"
class Deck:
def __init__(self, card_game):
self.game = card_game
# create deck with all 52 cards
self.cards = list()
for suit in self.game.suits:
for rank in self.game.ranks:
self.cards.append(Card(suit, rank))
def shuffle(self):
random.shuffle(self.cards)
def deal_card(self):
return self.cards.pop()
def __str__(self):
return f"{[x for x in self.cards]}"
class Chips:
def __init__(self, total=100):
self.total = total
self.bet = 0
def win_bet(self):
self.total += self.bet
self.bet = 0
def lose_bet(self):
self.total -= self.bet
self.bet = 0
def make_bet(self, bet):
if bet <= self.total:
self.bet = bet
else:
raise ValueError(f"The bet ({bet}) exceeds available chips ({self.total})")
def __str__(self):
return f"Total: {self.total}\nCurrent Bet:{self.bet}\n"
class Player:
def __init__(self, name):
self.name = name
self.wins = 0
self.lost_games = 0
self.chips = Chips()
def __str__(self):
return f"{self.name}:\n{self.wins} wins\n{self.lost_games} losses\nChips:{self.chips}\n"
class BlackJack:
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5, 'Six': 6, 'Seven': 7, 'Eight': 8, 'Nine': 9, 'Ten': 10,
'Jack': 10, 'Queen': 10, 'King': 10, 'Ace': 11}
def __init__(self, player):
self.player = player
self.deck = Deck(self)
self.playing = False
def greeting(self):
print("WELCOME TO BLACKJACK!")
def take_bet(self):
while True:
try:
# Ask the Player for their bet
bet = int(input("Please put your bet: "))
# Make sure that the Player's bet does not exceed their available chips
self.player.chips.make_bet(bet)
break
except TypeError:
print("Invalid input. Please try again")
except ValueError as exc:
print(f"{exc} Please try again")
def hit(self, hand):
cd = self.deck.deal_card()
# print(f"Deal Card: {cd}")
hand.add_card(cd)
hand.adjust_for_ace()
def hit_or_stand(self, hand):
while True:
print(f"{self.player.name}: current {hand.value}")
action = input("Hit or Stand? Enter 'h' or 's': ")
if action[0].lower() == 's':
print("STAY\n")
self.playing = False
elif action[0].lower() == 'h':
print("HIT\n")
self.hit(hand)
else:
print(f"Sorry, I do not understand your choice '{action}'. Please try again")
continue
break
def player_busts(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} BUSTED!")
self.player.chips.lose_bet()
self.player.lost_games += 1
def player_wins(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS! ")
self.player.chips.win_bet()
self.player.wins += 1
def dealer_busts(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS - Dealer BUSTED!")
self.player.chips.win_bet()
self.player.wins += 1
def dealer_wins(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: Dealer WINS")
self.player.chips.lose_bet()
self.player.lost_games += 1
def push(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: Dealer and {self.player.name} tie - PUSH!")
def show_some(self, p_hand, d_hand):
# Show only one of the Dealer's cards, the other remains hidden
print(f"Dealer's card (one hidden): {d_hand.cards[0]}")
# Show both of the Player's cards
print(f"{self.player.name}'s Cards:")
for card in p_hand.cards:
print(card)
print(f"total= {p_hand.value}")
def show_all_cards(self, p_hand, d_hand):
# Show both of the Player's cards
print(f"{self.player.name}'s Cards:")
for card in p_hand.cards:
print(card)
print(f"total= {p_hand.value}")
# Show both of the Player's cards
print(f"Dealer's Cards:")
for card in d_hand.cards:
print(card)
print(f"total= {d_hand.value}")
# MASKED: play function (lines 190-253)
if __name__ == "__main__":
game_on = True
# Play a new game of BlackJack with Player Daniela
player = Player('Daniela')
game = BlackJack(player)
game.greeting()
while game_on:
game.play()
print(f"GAME DONE.\nGame Stats:\n\n{player}")
# Ask the Player if they'd like to play again
if input("Would you like another game? y/n: ") != 'y':
game_on = False | def play(self):
"""
# 1. Create a deck of 52 cards
# 2. Shuffle the deck
# 3. Ask the Player for their bet
# 4. Make sure that the Player's bet does not exceed their available chips
# 5. Deal two cards to the Dealer and two cards to the Player
# 6. Show only one of the Dealer's cards, the other remains hidden
# 7. Show both of the Player's cards
# 8. Ask the Player if they wish to Hit, and take another card
# 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.
# 10. If a Player Stands, play the Dealer's hand.
# The dealer will always Hit until the Dealer's value meets or exceeds 17
# 11. Determine the winner and adjust the Player's chips accordingly
# 12. Ask the Player if they'd like to play again
"""
print("--NEW GAME---")
self.playing = True
self.deck.shuffle()
dealer_hand = Hand()
player_hand = Hand()
# Deal two cards to the Dealer and two cards to the Player
player_hand.add_card(self.deck.deal_card())
dealer_hand.add_card(self.deck.deal_card())
player_hand.add_card(self.deck.deal_card())
dealer_hand.add_card(self.deck.deal_card())
self.take_bet()
# show cards, but keep one dealer card hidden
self.show_some(player_hand, dealer_hand)
while self.playing:
# Ask the Player if they wish to Hit, and take another card
# If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.
self.hit_or_stand(player_hand)
self.show_some(player_hand, dealer_hand)
if player_hand.value > 21:
# player busts - lost his bet
self.player_busts(player_hand, dealer_hand)
break
# If Player has not busted
if player_hand.value <= 21:
# The dealer will always Hit until the Dealer's value meets or exceeds 17
while dealer_hand.value < 17:
self.hit(dealer_hand)
# Determine for the winner - show all cards
self.show_all_cards(player_hand, dealer_hand)
# Determine the winner and adjust the Player's chips accordingly
if dealer_hand.value > 21:
self.dealer_busts(player_hand, dealer_hand)
elif player_hand.value > dealer_hand.value:
self.player_wins(player_hand, dealer_hand)
elif player_hand.value < dealer_hand.value:
self.dealer_wins(player_hand, dealer_hand)
else:
self.push(player_hand, dealer_hand) | 190 | 253 | import random
class Card:
def __init__(self, suit, rank):
self.suit = suit
self.rank = rank
def __str__(self):
return f"{self.suit} {self.rank}: {BlackJack.values[self.rank]}"
class Hand:
def __init__(self):
self.cards = [] # start with empty list
self.value = 0
self.aces = 0
def adjust_for_ace(self):
while self.value > 21 and self.aces:
self.value -= 10
self.aces -= 1
def add_card(self, card):
self.cards.append(card)
self.value += BlackJack.values[card.rank]
if card.rank == 'Ace':
self.aces += 1
def __str__(self):
return f"Current Hand:{self.cards}\nCurrent Value:{self.value}\nCurrent Aces:{self.aces}\n"
class Deck:
def __init__(self, card_game):
self.game = card_game
# create deck with all 52 cards
self.cards = list()
for suit in self.game.suits:
for rank in self.game.ranks:
self.cards.append(Card(suit, rank))
def shuffle(self):
random.shuffle(self.cards)
def deal_card(self):
return self.cards.pop()
def __str__(self):
return f"{[x for x in self.cards]}"
class Chips:
def __init__(self, total=100):
self.total = total
self.bet = 0
def win_bet(self):
self.total += self.bet
self.bet = 0
def lose_bet(self):
self.total -= self.bet
self.bet = 0
def make_bet(self, bet):
if bet <= self.total:
self.bet = bet
else:
raise ValueError(f"The bet ({bet}) exceeds available chips ({self.total})")
def __str__(self):
return f"Total: {self.total}\nCurrent Bet:{self.bet}\n"
class Player:
def __init__(self, name):
self.name = name
self.wins = 0
self.lost_games = 0
self.chips = Chips()
def __str__(self):
return f"{self.name}:\n{self.wins} wins\n{self.lost_games} losses\nChips:{self.chips}\n"
class BlackJack:
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5, 'Six': 6, 'Seven': 7, 'Eight': 8, 'Nine': 9, 'Ten': 10,
'Jack': 10, 'Queen': 10, 'King': 10, 'Ace': 11}
def __init__(self, player):
self.player = player
self.deck = Deck(self)
self.playing = False
def greeting(self):
print("WELCOME TO BLACKJACK!")
def take_bet(self):
while True:
try:
# Ask the Player for their bet
bet = int(input("Please put your bet: "))
# Make sure that the Player's bet does not exceed their available chips
self.player.chips.make_bet(bet)
break
except TypeError:
print("Invalid input. Please try again")
except ValueError as exc:
print(f"{exc} Please try again")
def hit(self, hand):
cd = self.deck.deal_card()
# print(f"Deal Card: {cd}")
hand.add_card(cd)
hand.adjust_for_ace()
def hit_or_stand(self, hand):
while True:
print(f"{self.player.name}: current {hand.value}")
action = input("Hit or Stand? Enter 'h' or 's': ")
if action[0].lower() == 's':
print("STAY\n")
self.playing = False
elif action[0].lower() == 'h':
print("HIT\n")
self.hit(hand)
else:
print(f"Sorry, I do not understand your choice '{action}'. Please try again")
continue
break
def player_busts(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} BUSTED!")
self.player.chips.lose_bet()
self.player.lost_games += 1
def player_wins(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS! ")
self.player.chips.win_bet()
self.player.wins += 1
def dealer_busts(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS - Dealer BUSTED!")
self.player.chips.win_bet()
self.player.wins += 1
def dealer_wins(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: Dealer WINS")
self.player.chips.lose_bet()
self.player.lost_games += 1
def push(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: Dealer and {self.player.name} tie - PUSH!")
def show_some(self, p_hand, d_hand):
# Show only one of the Dealer's cards, the other remains hidden
print(f"Dealer's card (one hidden): {d_hand.cards[0]}")
# Show both of the Player's cards
print(f"{self.player.name}'s Cards:")
for card in p_hand.cards:
print(card)
print(f"total= {p_hand.value}")
def show_all_cards(self, p_hand, d_hand):
# Show both of the Player's cards
print(f"{self.player.name}'s Cards:")
for card in p_hand.cards:
print(card)
print(f"total= {p_hand.value}")
# Show both of the Player's cards
print(f"Dealer's Cards:")
for card in d_hand.cards:
print(card)
print(f"total= {d_hand.value}")
def play(self):
"""
# 1. Create a deck of 52 cards
# 2. Shuffle the deck
# 3. Ask the Player for their bet
# 4. Make sure that the Player's bet does not exceed their available chips
# 5. Deal two cards to the Dealer and two cards to the Player
# 6. Show only one of the Dealer's cards, the other remains hidden
# 7. Show both of the Player's cards
# 8. Ask the Player if they wish to Hit, and take another card
# 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.
# 10. If a Player Stands, play the Dealer's hand.
# The dealer will always Hit until the Dealer's value meets or exceeds 17
# 11. Determine the winner and adjust the Player's chips accordingly
# 12. Ask the Player if they'd like to play again
"""
print("--NEW GAME---")
self.playing = True
self.deck.shuffle()
dealer_hand = Hand()
player_hand = Hand()
# Deal two cards to the Dealer and two cards to the Player
player_hand.add_card(self.deck.deal_card())
dealer_hand.add_card(self.deck.deal_card())
player_hand.add_card(self.deck.deal_card())
dealer_hand.add_card(self.deck.deal_card())
self.take_bet()
# show cards, but keep one dealer card hidden
self.show_some(player_hand, dealer_hand)
while self.playing:
# Ask the Player if they wish to Hit, and take another card
# If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.
self.hit_or_stand(player_hand)
self.show_some(player_hand, dealer_hand)
if player_hand.value > 21:
# player busts - lost his bet
self.player_busts(player_hand, dealer_hand)
break
# If Player has not busted
if player_hand.value <= 21:
# The dealer will always Hit until the Dealer's value meets or exceeds 17
while dealer_hand.value < 17:
self.hit(dealer_hand)
# Determine for the winner - show all cards
self.show_all_cards(player_hand, dealer_hand)
# Determine the winner and adjust the Player's chips accordingly
if dealer_hand.value > 21:
self.dealer_busts(player_hand, dealer_hand)
elif player_hand.value > dealer_hand.value:
self.player_wins(player_hand, dealer_hand)
elif player_hand.value < dealer_hand.value:
self.dealer_wins(player_hand, dealer_hand)
else:
self.push(player_hand, dealer_hand)
if __name__ == "__main__":
game_on = True
# Play a new game of BlackJack with Player Daniela
player = Player('Daniela')
game = BlackJack(player)
game.greeting()
while game_on:
game.play()
print(f"GAME DONE.\nGame Stats:\n\n{player}")
# Ask the Player if they'd like to play again
if input("Would you like another game? y/n: ") != 'y':
game_on = False
|
__init__ | NetApp account resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Account']
class Account(pulumi.CustomResource):
# MASKED: __init__ function (lines 17-72)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Account':
"""
Get an existing Account resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["active_directories"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
return Account(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activeDirectories")
def active_directories(self) -> pulumi.Output[Optional[Sequence['outputs.ActiveDirectoryResponse']]]:
"""
Active Directories
"""
return pulumi.get(self, "active_directories")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
NetApp account resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['account_name'] = account_name
__props__['active_directories'] = active_directories
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Account"), pulumi.Alias(type_="azure-native:netapp:Account"), pulumi.Alias(type_="azure-nextgen:netapp:Account"), pulumi.Alias(type_="azure-native:netapp/latest:Account"), pulumi.Alias(type_="azure-nextgen:netapp/latest:Account"), pulumi.Alias(type_="azure-native:netapp/v20170815:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20170815:Account"), pulumi.Alias(type_="azure-native:netapp/v20190501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190501:Account"), pulumi.Alias(type_="azure-native:netapp/v20190601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190601:Account"), pulumi.Alias(type_="azure-native:netapp/v20190701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190701:Account"), pulumi.Alias(type_="azure-native:netapp/v20190801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190801:Account"), pulumi.Alias(type_="azure-native:netapp/v20191001:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191001:Account"), pulumi.Alias(type_="azure-native:netapp/v20191101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191101:Account"), pulumi.Alias(type_="azure-native:netapp/v20200201:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200201:Account"), pulumi.Alias(type_="azure-native:netapp/v20200301:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200301:Account"), pulumi.Alias(type_="azure-native:netapp/v20200501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Account"), pulumi.Alias(type_="azure-native:netapp/v20200601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Account"), pulumi.Alias(type_="azure-native:netapp/v20200701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Account"), pulumi.Alias(type_="azure-native:netapp/v20200801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Account"), pulumi.Alias(type_="azure-native:netapp/v20201101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Account")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Account, __self__).__init__(
'azure-native:netapp/v20200901:Account',
resource_name,
__props__,
opts) | 17 | 72 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Account']
class Account(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
NetApp account resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['account_name'] = account_name
__props__['active_directories'] = active_directories
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Account"), pulumi.Alias(type_="azure-native:netapp:Account"), pulumi.Alias(type_="azure-nextgen:netapp:Account"), pulumi.Alias(type_="azure-native:netapp/latest:Account"), pulumi.Alias(type_="azure-nextgen:netapp/latest:Account"), pulumi.Alias(type_="azure-native:netapp/v20170815:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20170815:Account"), pulumi.Alias(type_="azure-native:netapp/v20190501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190501:Account"), pulumi.Alias(type_="azure-native:netapp/v20190601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190601:Account"), pulumi.Alias(type_="azure-native:netapp/v20190701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190701:Account"), pulumi.Alias(type_="azure-native:netapp/v20190801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190801:Account"), pulumi.Alias(type_="azure-native:netapp/v20191001:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191001:Account"), pulumi.Alias(type_="azure-native:netapp/v20191101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191101:Account"), pulumi.Alias(type_="azure-native:netapp/v20200201:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200201:Account"), pulumi.Alias(type_="azure-native:netapp/v20200301:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200301:Account"), pulumi.Alias(type_="azure-native:netapp/v20200501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Account"), pulumi.Alias(type_="azure-native:netapp/v20200601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Account"), pulumi.Alias(type_="azure-native:netapp/v20200701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Account"), pulumi.Alias(type_="azure-native:netapp/v20200801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Account"), pulumi.Alias(type_="azure-native:netapp/v20201101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Account")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Account, __self__).__init__(
'azure-native:netapp/v20200901:Account',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Account':
"""
Get an existing Account resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["active_directories"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
return Account(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activeDirectories")
def active_directories(self) -> pulumi.Output[Optional[Sequence['outputs.ActiveDirectoryResponse']]]:
"""
Active Directories
"""
return pulumi.get(self, "active_directories")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
get | Get an existing Account resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource. | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Account']
class Account(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
NetApp account resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['account_name'] = account_name
__props__['active_directories'] = active_directories
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Account"), pulumi.Alias(type_="azure-native:netapp:Account"), pulumi.Alias(type_="azure-nextgen:netapp:Account"), pulumi.Alias(type_="azure-native:netapp/latest:Account"), pulumi.Alias(type_="azure-nextgen:netapp/latest:Account"), pulumi.Alias(type_="azure-native:netapp/v20170815:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20170815:Account"), pulumi.Alias(type_="azure-native:netapp/v20190501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190501:Account"), pulumi.Alias(type_="azure-native:netapp/v20190601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190601:Account"), pulumi.Alias(type_="azure-native:netapp/v20190701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190701:Account"), pulumi.Alias(type_="azure-native:netapp/v20190801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190801:Account"), pulumi.Alias(type_="azure-native:netapp/v20191001:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191001:Account"), pulumi.Alias(type_="azure-native:netapp/v20191101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191101:Account"), pulumi.Alias(type_="azure-native:netapp/v20200201:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200201:Account"), pulumi.Alias(type_="azure-native:netapp/v20200301:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200301:Account"), pulumi.Alias(type_="azure-native:netapp/v20200501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Account"), pulumi.Alias(type_="azure-native:netapp/v20200601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Account"), pulumi.Alias(type_="azure-native:netapp/v20200701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Account"), pulumi.Alias(type_="azure-native:netapp/v20200801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Account"), pulumi.Alias(type_="azure-native:netapp/v20201101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Account")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Account, __self__).__init__(
'azure-native:netapp/v20200901:Account',
resource_name,
__props__,
opts)
# MASKED: get function (lines 74-96)
@property
@pulumi.getter(name="activeDirectories")
def active_directories(self) -> pulumi.Output[Optional[Sequence['outputs.ActiveDirectoryResponse']]]:
"""
Active Directories
"""
return pulumi.get(self, "active_directories")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| @staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Account':
"""
Get an existing Account resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["active_directories"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
return Account(resource_name, opts=opts, __props__=__props__) | 74 | 96 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Account']
class Account(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
NetApp account resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['account_name'] = account_name
__props__['active_directories'] = active_directories
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Account"), pulumi.Alias(type_="azure-native:netapp:Account"), pulumi.Alias(type_="azure-nextgen:netapp:Account"), pulumi.Alias(type_="azure-native:netapp/latest:Account"), pulumi.Alias(type_="azure-nextgen:netapp/latest:Account"), pulumi.Alias(type_="azure-native:netapp/v20170815:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20170815:Account"), pulumi.Alias(type_="azure-native:netapp/v20190501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190501:Account"), pulumi.Alias(type_="azure-native:netapp/v20190601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190601:Account"), pulumi.Alias(type_="azure-native:netapp/v20190701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190701:Account"), pulumi.Alias(type_="azure-native:netapp/v20190801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190801:Account"), pulumi.Alias(type_="azure-native:netapp/v20191001:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191001:Account"), pulumi.Alias(type_="azure-native:netapp/v20191101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191101:Account"), pulumi.Alias(type_="azure-native:netapp/v20200201:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200201:Account"), pulumi.Alias(type_="azure-native:netapp/v20200301:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200301:Account"), pulumi.Alias(type_="azure-native:netapp/v20200501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Account"), pulumi.Alias(type_="azure-native:netapp/v20200601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Account"), pulumi.Alias(type_="azure-native:netapp/v20200701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Account"), pulumi.Alias(type_="azure-native:netapp/v20200801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Account"), pulumi.Alias(type_="azure-native:netapp/v20201101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Account")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Account, __self__).__init__(
'azure-native:netapp/v20200901:Account',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Account':
"""
Get an existing Account resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["active_directories"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
return Account(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activeDirectories")
def active_directories(self) -> pulumi.Output[Optional[Sequence['outputs.ActiveDirectoryResponse']]]:
"""
Active Directories
"""
return pulumi.get(self, "active_directories")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
make_layer | Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6. | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.utils import make_divisible
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class InvertedResidual(BaseModule):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=1,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class MobileNetV2(BaseBackbone):
"""MobileNetV2 backbone.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
deep_stem=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
norm_eval=False,
with_cp=False,
init_cfg=[
dict(type='Kaiming', layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]):
super(MobileNetV2, self).__init__(init_cfg)
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 8):
raise ValueError('the item in out_indices must in '
f'range(0, 8). But received {index}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
if deep_stem:
self.conv0 = ConvModule(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
in_channels_ = 16
else:
in_channels_ = 3
self.conv0 = nn.Sequential()
self.conv1 = ConvModule(
in_channels=in_channels_,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
# MASKED: make_layer function (lines 214-240)
def forward(self, x):
x = self.conv0(x)
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() | def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers) | 214 | 240 | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.utils import make_divisible
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class InvertedResidual(BaseModule):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=1,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class MobileNetV2(BaseBackbone):
"""MobileNetV2 backbone.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
deep_stem=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
norm_eval=False,
with_cp=False,
init_cfg=[
dict(type='Kaiming', layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]):
super(MobileNetV2, self).__init__(init_cfg)
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 8):
raise ValueError('the item in out_indices must in '
f'range(0, 8). But received {index}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
if deep_stem:
self.conv0 = ConvModule(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
in_channels_ = 16
else:
in_channels_ = 3
self.conv0 = nn.Sequential()
self.conv1 = ConvModule(
in_channels=in_channels_,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv0(x)
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
dlrn_http_factory | Create a DlrnData instance based on a host.
:param host: A host name string to build instances
:param config_file: A dlrn config file(s) to use in addition to
the default.
:param link_name: A dlrn symlink to use. This overrides the config files
link parameter.
:param logger: An atkinson logger to use. Default is the base logger.
:return: A DlrnData instance | #! /usr/bin/env python
"""Functions for working with the DLRN API"""
import csv
import os.path
import requests
from toolchest import yaml
from atkinson.config.manager import ConfigManager
from atkinson.logging.logger import getLogger
def _raw_fetch(url, logger):
"""
Fetch remote data and return the text output.
:param url: The URL to fetch the data from
:param logger: A logger instance to use.
:return: Raw text data, None otherwise
"""
ret_data = None
try:
req = requests.get(url)
if req.status_code == requests.codes.ok:
ret_data = req.text
except requests.exceptions.ConnectionError as error:
logger.warning(error.request)
return ret_data
def _fetch_yaml(url, logger):
"""
Fetch remote data and process the text as yaml.
:param url: The URL to fetch the data from
:param logger: A logger instance to use.
:return: Parsed yaml data in the form of a dictionary
"""
ret_data = None
raw_data = _raw_fetch(url, logger)
if raw_data is not None:
ret_data = yaml.parse(raw_data)
return ret_data
# MASKED: dlrn_http_factory function (lines 49-87)
class DlrnHttpData():
"""A class used to interact with the dlrn API"""
def __init__(self, url, release, link_name='current', logger=getLogger()):
"""
Class constructor
:param url: The URL to the host to obtain data.
:param releases: The release name to use for lookup.
:param link_name: The name of the dlrn symlink to fetch data from.
:param logger: An atkinson logger to use. Default is the base logger.
"""
self.url = os.path.join(url, release)
self.release = release
self._logger = logger
self._link_name = link_name
self._commit_data = {}
self._fetch_commit()
def _fetch_commit(self):
"""
Fetch the commit data from dlrn
"""
full_url = os.path.join(self.url,
self._link_name,
'commit.yaml')
data = _fetch_yaml(full_url, self._logger)
if data is not None and 'commits' in data:
pkg = data['commits'][0]
if pkg['status'] == 'SUCCESS':
self._commit_data = {'name': pkg['project_name'],
'dist_hash': pkg['distro_hash'],
'commit_hash': pkg['commit_hash'],
'extended_hash': pkg.get('extended_hash')}
else:
msg = '{0} has a status of error'.format(str(pkg))
self._logger.warning(msg)
def _build_url(self):
"""
Generate a url given a commit hash and distgit hash to match the format
base/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987
is a portion of the distgit hash.
:return: A string with the full URL.
"""
first = self._commit_data['commit_hash'][0:2]
second = self._commit_data['commit_hash'][2:4]
third = self._commit_data['commit_hash']
for key in ['dist_hash', 'extended_hash']:
if self._commit_data.get(key, 'None') != 'None':
third += '_' + self._commit_data[key][0:8]
return os.path.join(self.url,
first,
second,
third)
@property
def commit(self):
"""
Get the dlrn commit information
:return: A dictionary of name, dist-git hash, commit hash and
extended hash.
An empty dictionary is returned otherwise.
"""
return self._commit_data
@property
def versions(self):
"""
Get the version data for the versions.csv file and return the
data in a dictionary
:return: A dictionary of packages with commit and dist-git hashes
"""
ret_dict = {}
full_url = os.path.join(self._build_url(), 'versions.csv')
data = _raw_fetch(full_url, self._logger)
if data is not None:
data = data.replace(' ', '_')
split_data = data.split()
reader = csv.DictReader(split_data)
for row in reader:
ret_dict[row['Project']] = {'source': row['Source_Sha'],
'state': row['Status'],
'distgit': row['Dist_Sha'],
'nvr': row['Pkg_NVR']}
else:
msg = 'Could not fetch {0}'.format(full_url)
self._logger.error(msg)
return ret_dict | def dlrn_http_factory(host, config_file=None, link_name=None,
logger=getLogger()):
"""
Create a DlrnData instance based on a host.
:param host: A host name string to build instances
:param config_file: A dlrn config file(s) to use in addition to
the default.
:param link_name: A dlrn symlink to use. This overrides the config files
link parameter.
:param logger: An atkinson logger to use. Default is the base logger.
:return: A DlrnData instance
"""
manager = None
files = ['dlrn.yml']
if config_file is not None:
if isinstance(config_file, list):
files.extend(config_file)
else:
files.append(config_file)
local_path = os.path.realpath(os.path.dirname(__file__))
manager = ConfigManager(filenames=files, paths=local_path)
if manager is None:
return None
config = manager.config
if host not in config:
return None
link = config[host]['link']
if link_name is not None:
link = link_name
return DlrnHttpData(config[host]['url'],
config[host]['release'],
link_name=link,
logger=logger) | 49 | 87 | #! /usr/bin/env python
"""Functions for working with the DLRN API"""
import csv
import os.path
import requests
from toolchest import yaml
from atkinson.config.manager import ConfigManager
from atkinson.logging.logger import getLogger
def _raw_fetch(url, logger):
"""
Fetch remote data and return the text output.
:param url: The URL to fetch the data from
:param logger: A logger instance to use.
:return: Raw text data, None otherwise
"""
ret_data = None
try:
req = requests.get(url)
if req.status_code == requests.codes.ok:
ret_data = req.text
except requests.exceptions.ConnectionError as error:
logger.warning(error.request)
return ret_data
def _fetch_yaml(url, logger):
"""
Fetch remote data and process the text as yaml.
:param url: The URL to fetch the data from
:param logger: A logger instance to use.
:return: Parsed yaml data in the form of a dictionary
"""
ret_data = None
raw_data = _raw_fetch(url, logger)
if raw_data is not None:
ret_data = yaml.parse(raw_data)
return ret_data
def dlrn_http_factory(host, config_file=None, link_name=None,
logger=getLogger()):
"""
Create a DlrnData instance based on a host.
:param host: A host name string to build instances
:param config_file: A dlrn config file(s) to use in addition to
the default.
:param link_name: A dlrn symlink to use. This overrides the config files
link parameter.
:param logger: An atkinson logger to use. Default is the base logger.
:return: A DlrnData instance
"""
manager = None
files = ['dlrn.yml']
if config_file is not None:
if isinstance(config_file, list):
files.extend(config_file)
else:
files.append(config_file)
local_path = os.path.realpath(os.path.dirname(__file__))
manager = ConfigManager(filenames=files, paths=local_path)
if manager is None:
return None
config = manager.config
if host not in config:
return None
link = config[host]['link']
if link_name is not None:
link = link_name
return DlrnHttpData(config[host]['url'],
config[host]['release'],
link_name=link,
logger=logger)
class DlrnHttpData():
"""A class used to interact with the dlrn API"""
def __init__(self, url, release, link_name='current', logger=getLogger()):
"""
Class constructor
:param url: The URL to the host to obtain data.
:param releases: The release name to use for lookup.
:param link_name: The name of the dlrn symlink to fetch data from.
:param logger: An atkinson logger to use. Default is the base logger.
"""
self.url = os.path.join(url, release)
self.release = release
self._logger = logger
self._link_name = link_name
self._commit_data = {}
self._fetch_commit()
def _fetch_commit(self):
"""
Fetch the commit data from dlrn
"""
full_url = os.path.join(self.url,
self._link_name,
'commit.yaml')
data = _fetch_yaml(full_url, self._logger)
if data is not None and 'commits' in data:
pkg = data['commits'][0]
if pkg['status'] == 'SUCCESS':
self._commit_data = {'name': pkg['project_name'],
'dist_hash': pkg['distro_hash'],
'commit_hash': pkg['commit_hash'],
'extended_hash': pkg.get('extended_hash')}
else:
msg = '{0} has a status of error'.format(str(pkg))
self._logger.warning(msg)
def _build_url(self):
"""
Generate a url given a commit hash and distgit hash to match the format
base/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987
is a portion of the distgit hash.
:return: A string with the full URL.
"""
first = self._commit_data['commit_hash'][0:2]
second = self._commit_data['commit_hash'][2:4]
third = self._commit_data['commit_hash']
for key in ['dist_hash', 'extended_hash']:
if self._commit_data.get(key, 'None') != 'None':
third += '_' + self._commit_data[key][0:8]
return os.path.join(self.url,
first,
second,
third)
@property
def commit(self):
"""
Get the dlrn commit information
:return: A dictionary of name, dist-git hash, commit hash and
extended hash.
An empty dictionary is returned otherwise.
"""
return self._commit_data
@property
def versions(self):
"""
Get the version data for the versions.csv file and return the
data in a dictionary
:return: A dictionary of packages with commit and dist-git hashes
"""
ret_dict = {}
full_url = os.path.join(self._build_url(), 'versions.csv')
data = _raw_fetch(full_url, self._logger)
if data is not None:
data = data.replace(' ', '_')
split_data = data.split()
reader = csv.DictReader(split_data)
for row in reader:
ret_dict[row['Project']] = {'source': row['Source_Sha'],
'state': row['Status'],
'distgit': row['Dist_Sha'],
'nvr': row['Pkg_NVR']}
else:
msg = 'Could not fetch {0}'.format(full_url)
self._logger.error(msg)
return ret_dict
|
to_shapefile | Export stress period boundary condition (MfList) data for a specified
stress period
Parameters
----------
filename : str
Shapefile name to write
kper : int
MODFLOW zero-based stress period number to return. (default is None)
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.wel.to_shapefile('test_hk.shp', kper=1) | """
util_list module. Contains the mflist class.
This classes encapsulates modflow-style list inputs away
from the individual packages. The end-user should not need to
instantiate this class directly.
some more info
"""
from __future__ import division, print_function
import os
import warnings
import numpy as np
from ..datbase import DataInterface, DataListInterface, DataType
from ..utils.recarray_utils import create_empty_recarray
try:
from numpy.lib import NumpyVersion
numpy114 = NumpyVersion(np.__version__) >= "1.14.0"
except ImportError:
numpy114 = False
class MfList(DataInterface, DataListInterface):
"""
a generic object for handling transient boundary condition lists
Parameters
----------
package : package object
The package object (of type :class:`flopy.pakbase.Package`) to which
this MfList will be added.
data : varies
the data of the transient list (optional). (the default is None)
Attributes
----------
mxact : int
the max number of active bc for any stress period
Methods
-------
add_record(kper,index,value) : None
add a record to stress period kper at index location
write_transient(f) : None
write the transient sequence to the model input file f
check_kij() : None
checks for boundaries outside of model domain - issues warnings only
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(
self,
package,
data=None,
dtype=None,
model=None,
list_free_format=None,
binary=False,
):
if isinstance(data, MfList):
for attr in data.__dict__.items():
setattr(self, attr[0], attr[1])
if model is None:
self._model = package.parent
else:
self._model = model
self._package = package
return
self._package = package
if model is None:
self._model = package.parent
else:
self._model = model
if dtype is None:
assert isinstance(self.package.dtype, np.dtype)
self.__dtype = self.package.dtype
else:
self.__dtype = dtype
self.__binary = binary
self.__vtype = {}
self.__data = {}
if data is not None:
self.__cast_data(data)
self.__df = None
if list_free_format is None:
if package.parent.version == "mf2k":
list_free_format = False
self.list_free_format = list_free_format
return
@property
def name(self):
return self.package.name
@property
def mg(self):
return self._model.modelgrid
@property
def sr(self):
return self.mg.sr
@property
def model(self):
return self._model
@property
def package(self):
return self._package
@property
def data_type(self):
return DataType.transientlist
@property
def plotable(self):
return True
def get_empty(self, ncell=0):
d = create_empty_recarray(ncell, self.dtype, default_value=-1.0e10)
return d
def export(self, f, **kwargs):
from flopy import export
return export.utils.mflist_export(f, self, **kwargs)
def append(self, other):
""" append the recarrays from one MfList to another
Parameters
----------
other: variable: an item that can be cast in to an MfList
that corresponds with self
Returns
-------
dict of {kper:recarray}
"""
if not isinstance(other, MfList):
other = MfList(
self.package,
data=other,
dtype=self.dtype,
model=self._model,
list_free_format=self.list_free_format,
)
msg = (
"MfList.append(): other arg must be "
+ "MfList or dict, not {0}".format(type(other))
)
assert isinstance(other, MfList), msg
other_kpers = list(other.data.keys())
other_kpers.sort()
self_kpers = list(self.data.keys())
self_kpers.sort()
new_dict = {}
for kper in range(self._model.nper):
other_data = other[kper].copy()
self_data = self[kper].copy()
other_len = other_data.shape[0]
self_len = self_data.shape[0]
if (other_len == 0 and self_len == 0) or (
kper not in self_kpers and kper not in other_kpers
):
continue
elif self_len == 0:
new_dict[kper] = other_data
elif other_len == 0:
new_dict[kper] = self_data
else:
new_len = other_data.shape[0] + self_data.shape[0]
new_data = np.recarray(new_len, dtype=self.dtype)
new_data[:self_len] = self_data
new_data[self_len : self_len + other_len] = other_data
new_dict[kper] = new_data
return new_dict
def drop(self, fields):
"""drop fields from an MfList
Parameters
----------
fields : list or set of field names to drop
Returns
-------
dropped : MfList without the dropped fields
"""
if not isinstance(fields, list):
fields = [fields]
names = [n for n in self.dtype.names if n not in fields]
dtype = np.dtype(
[(k, d) for k, d in self.dtype.descr if k not in fields]
)
spd = {}
for k, v in self.data.items():
# because np 1.9 doesn't support indexing by list of columns
newarr = np.array([self.data[k][n] for n in names]).transpose()
newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(
np.recarray
)
for n in dtype.names:
newarr[n] = self.data[k][n]
spd[k] = newarr
return MfList(self.package, spd, dtype=dtype)
@property
def data(self):
return self.__data
@property
def df(self):
if self.__df is None:
self.__df = self.get_dataframe()
return self.__df
@property
def vtype(self):
return self.__vtype
@property
def dtype(self):
return self.__dtype
# Get the itmp for a given kper
def get_itmp(self, kper):
if kper not in list(self.__data.keys()):
return None
if self.__vtype[kper] is None:
return -1
# If an external file, have to load it
if self.__vtype[kper] == str:
return self.__fromfile(self.__data[kper]).shape[0]
if self.__vtype[kper] == np.recarray:
return self.__data[kper].shape[0]
# If not any of the above, it must be an int
return self.__data[kper]
@property
def mxact(self):
mxact = 0
for kper in list(self.__data.keys()):
mxact = max(mxact, self.get_itmp(kper))
return mxact
@property
def fmt_string(self):
"""Returns a C-style fmt string for numpy savetxt that corresponds to
the dtype"""
if self.list_free_format is not None:
use_free = self.list_free_format
else:
use_free = True
if self.package.parent.has_package("bas6"):
use_free = self.package.parent.bas6.ifrefm
# mt3d list data is fixed format
if "mt3d" in self.package.parent.version.lower():
use_free = False
fmts = []
for field in self.dtype.descr:
vtype = field[1][1].lower()
if vtype in ("i", "b"):
if use_free:
fmts.append("%9d")
else:
fmts.append("%10d")
elif vtype == "f":
if use_free:
if numpy114:
# Use numpy's floating-point formatter (Dragon4)
fmts.append("%15s")
else:
fmts.append("%15.7E")
else:
fmts.append("%10G")
elif vtype == "o":
if use_free:
fmts.append("%9s")
else:
fmts.append("%10s")
elif vtype == "s":
msg = (
"MfList.fmt_string error: 'str' type found in dtype. "
"This gives unpredictable results when "
"recarray to file - change to 'object' type"
)
raise TypeError(msg)
else:
raise TypeError(
"MfList.fmt_string error: unknown vtype in "
"field: {}".format(field)
)
if use_free:
fmt_string = " " + " ".join(fmts)
else:
fmt_string = "".join(fmts)
return fmt_string
# Private method to cast the data argument
# Should only be called by the constructor
def __cast_data(self, data):
# If data is a list, then all we can do is try to cast it to
# an ndarray, then cast again to a recarray
if isinstance(data, list):
# warnings.warn("MfList casting list to array")
try:
data = np.array(data)
except Exception as e:
raise Exception(
"MfList error: casting list to ndarray: " + str(e)
)
# If data is a dict, the we have to assume it is keyed on kper
if isinstance(data, dict):
if not list(data.keys()):
raise Exception("MfList error: data dict is empty")
for kper, d in data.items():
try:
kper = int(kper)
except Exception as e:
raise Exception(
"MfList error: data dict key "
+ "{0:s} not integer: ".format(kper)
+ str(type(kper))
+ "\n"
+ str(e)
)
# Same as before, just try...
if isinstance(d, list):
# warnings.warn("MfList: casting list to array at " +\
# "kper {0:d}".format(kper))
try:
d = np.array(d)
except Exception as e:
raise Exception(
"MfList error: casting list "
+ "to ndarray: "
+ str(e)
)
# super hack - sick of recarrays already
# if (isinstance(d,np.ndarray) and len(d.dtype.fields) > 1):
# d = d.view(np.recarray)
if isinstance(d, np.recarray):
self.__cast_recarray(kper, d)
elif isinstance(d, np.ndarray):
self.__cast_ndarray(kper, d)
elif isinstance(d, int):
self.__cast_int(kper, d)
elif isinstance(d, str):
self.__cast_str(kper, d)
elif d is None:
self.__data[kper] = -1
self.__vtype[kper] = None
else:
raise Exception(
"MfList error: unsupported data type: "
+ str(type(d))
+ " at kper "
+ "{0:d}".format(kper)
)
# A single recarray - same MfList for all stress periods
elif isinstance(data, np.recarray):
self.__cast_recarray(0, data)
# A single ndarray
elif isinstance(data, np.ndarray):
self.__cast_ndarray(0, data)
# A single filename
elif isinstance(data, str):
self.__cast_str(0, data)
else:
raise Exception(
"MfList error: unsupported data type: " + str(type(data))
)
def __cast_str(self, kper, d):
# If d is a string, assume it is a filename and check that it exists
assert os.path.exists(d), (
"MfList error: dict filename (string) '"
+ d
+ "' value for "
+ "kper {0:d} not found".format(kper)
)
self.__data[kper] = d
self.__vtype[kper] = str
def __cast_int(self, kper, d):
# If d is an integer, then it must be 0 or -1
if d > 0:
raise Exception(
"MfList error: dict integer value for "
"kper {0:10d} must be 0 or -1, "
"not {1:10d}".format(kper, d)
)
if d == 0:
self.__data[kper] = 0
self.__vtype[kper] = None
else:
self.__data[kper] = -1
self.__vtype[kper] = None
def __cast_recarray(self, kper, d):
assert d.dtype == self.__dtype, (
"MfList error: recarray dtype: "
+ str(d.dtype)
+ " doesn't match "
+ "self dtype: "
+ str(self.dtype)
)
self.__data[kper] = d
self.__vtype[kper] = np.recarray
def __cast_ndarray(self, kper, d):
d = np.atleast_2d(d)
if d.dtype != self.__dtype:
assert d.shape[1] == len(self.dtype), (
"MfList error: ndarray "
+ "shape "
+ str(d.shape)
+ " doesn't match dtype "
+ "len: "
+ str(len(self.dtype))
)
# warnings.warn("MfList: ndarray dtype does not match self " +\
# "dtype, trying to cast")
try:
self.__data[kper] = np.core.records.fromarrays(
d.transpose(), dtype=self.dtype
)
except Exception as e:
raise Exception(
"MfList error: casting ndarray to recarray: " + str(e)
)
self.__vtype[kper] = np.recarray
def get_dataframe(self, squeeze=True):
"""
Cast recarrays for stress periods into single
dataframe containing all stress periods.
Parameters
----------
squeeze : bool
Reduce number of columns in dataframe to only include
stress periods where a variable changes.
Returns
-------
df : dataframe
Dataframe of shape nrow = ncells, ncol = nvar x nper. If
the squeeze option is chosen, nper is the number of
stress periods where at least one cells is different,
otherwise it is equal to the number of keys in MfList.data.
Notes
-----
Requires pandas.
"""
try:
import pandas as pd
except Exception as e:
msg = "MfList.get_dataframe() requires pandas"
raise ImportError(msg)
# make a dataframe of all data for all stress periods
names = ["k", "i", "j"]
if "MNW2" in self.package.name:
names += ["wellid"]
# find relevant variable names
# may have to iterate over the first stress period
for per in range(self._model.nper):
if hasattr(self.data[per], "dtype"):
varnames = list(
[n for n in self.data[per].dtype.names if n not in names]
)
break
# create list of dataframes for each stress period
# each with index of k, i, j
dfs = []
for per in self.data.keys():
recs = self.data[per]
if recs is None or len(recs) == 0:
# add an empty dataframe if a stress period is
# empty (e.g. no pumping during a predevelopment
# period)
columns = names + list(
["{}{}".format(c, per) for c in varnames]
)
dfi = pd.DataFrame(data=None, columns=columns)
dfi = dfi.set_index(names)
else:
dfi = pd.DataFrame.from_records(recs)
dfg = dfi.groupby(names)
count = dfg[varnames[0]].count().rename("n")
if (count > 1).values.any():
print(
"Duplicated list entry locations aggregated "
"for kper {}".format(per)
)
for kij in count[count > 1].index.values:
print(" (k,i,j) {}".format(kij))
dfi = dfg.sum() # aggregate
dfi.columns = list(["{}{}".format(c, per) for c in varnames])
dfs.append(dfi)
df = pd.concat(dfs, axis=1)
if squeeze:
keep = []
for var in varnames:
diffcols = list([n for n in df.columns if var in n])
diff = df[diffcols].fillna(0).diff(axis=1)
diff[
"{}0".format(var)
] = 1 # always return the first stress period
changed = diff.sum(axis=0) != 0
keep.append(df.loc[:, changed.index[changed]])
df = pd.concat(keep, axis=1)
df = df.reset_index()
df.insert(len(names), "node", df.i * self._model.ncol + df.j)
return df
def add_record(self, kper, index, values):
# Add a record to possible already set list for a given kper
# index is a list of k,i,j or nodes.
# values is a list of floats.
# The length of index + values must be equal to the number of names
# in dtype
assert len(index) + len(values) == len(self.dtype), (
"MfList.add_record() error: length of index arg +"
+ "length of value arg != length of self dtype"
)
# If we already have something for this kper, then add to it
if kper in list(self.__data.keys()):
if self.vtype[kper] == int:
# If a 0 or -1, reset
self.__data[kper] = self.get_empty(1)
self.__vtype[kper] = np.recarray
elif self.vtype[kper] == str:
# If filename, load into recarray
d = self.__fromfile(self.data[kper])
d.resize(d.shape[0], d.shape[1])
self.__data[kper] = d
self.__vtype[kper] = np.recarray
elif self.vtype[kper] == np.recarray:
# Extend the recarray
self.__data[kper] = np.append(
self.__data[kper], self.get_empty(1)
)
else:
self.__data[kper] = self.get_empty(1)
self.__vtype[kper] = np.recarray
rec = list(index)
rec.extend(list(values))
try:
self.__data[kper][-1] = tuple(rec)
except Exception as e:
raise Exception(
"MfList.add_record() error: adding record to "
+ "recarray: "
+ str(e)
)
def __getitem__(self, kper):
# Get the recarray for a given kper
# If the data entry for kper is a string,
# return the corresponding recarray,
# but don't reset the value in the data dict
# assert kper in list(self.data.keys()), "MfList.__getitem__() kper " + \
# str(kper) + " not in data.keys()"
try:
kper = int(kper)
except Exception as e:
raise Exception(
"MfList error: _getitem__() passed invalid kper index:"
+ str(kper)
)
if kper not in list(self.data.keys()):
if kper == 0:
return self.get_empty()
else:
return self.data[self.__find_last_kper(kper)]
if self.vtype[kper] == int:
if self.data[kper] == 0:
return self.get_empty()
else:
return self.data[self.__find_last_kper(kper)]
if self.vtype[kper] == str:
return self.__fromfile(self.data[kper])
if self.vtype[kper] == np.recarray:
return self.data[kper]
def __setitem__(self, kper, data):
if kper in list(self.__data.keys()):
if self._model.verbose:
print("removing existing data for kper={}".format(kper))
self.data.pop(kper)
# If data is a list, then all we can do is try to cast it to
# an ndarray, then cast again to a recarray
if isinstance(data, list):
# warnings.warn("MfList casting list to array")
try:
data = np.array(data)
except Exception as e:
raise Exception(
"MfList error: casting list to ndarray: " + str(e)
)
# cast data
if isinstance(data, int):
self.__cast_int(kper, data)
elif isinstance(data, np.recarray):
self.__cast_recarray(kper, data)
# A single ndarray
elif isinstance(data, np.ndarray):
self.__cast_ndarray(kper, data)
# A single filename
elif isinstance(data, str):
self.__cast_str(kper, data)
else:
raise Exception(
"MfList error: unsupported data type: " + str(type(data))
)
# raise NotImplementedError("MfList.__setitem__() not implemented")
def __fromfile(self, f):
# d = np.fromfile(f,dtype=self.dtype,count=count)
try:
d = np.genfromtxt(f, dtype=self.dtype)
except Exception as e:
raise Exception(
"MfList.__fromfile() error reading recarray "
+ "from file "
+ str(e)
)
return d
def get_filenames(self):
kpers = list(self.data.keys())
kpers.sort()
filenames = []
first = kpers[0]
for kper in list(range(0, max(self._model.nper, max(kpers) + 1))):
# Fill missing early kpers with 0
if kper < first:
itmp = 0
kper_vtype = int
elif kper in kpers:
kper_vtype = self.__vtype[kper]
if (
self._model.array_free_format
and self._model.external_path is not None
):
# py_filepath = ''
# py_filepath = os.path.join(py_filepath,
# self._model.external_path)
filename = self.package.name[0] + "_{0:04d}.dat".format(kper)
filenames.append(filename)
return filenames
def get_filename(self, kper):
ext = "dat"
if self.binary:
ext = "bin"
return self.package.name[0] + "_{0:04d}.{1}".format(kper, ext)
@property
def binary(self):
return bool(self.__binary)
def write_transient(self, f, single_per=None, forceInternal=False):
# forceInternal overrides isExternal (set below) for cases where
# external arrays are not supported (oh hello MNW1!)
# write the transient sequence described by the data dict
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
assert hasattr(f, "read"), (
"MfList.write() error: " + "f argument must be a file handle"
)
kpers = list(self.data.keys())
kpers.sort()
first = kpers[0]
if single_per is None:
loop_over_kpers = list(range(0, max(nper, max(kpers) + 1)))
else:
if not isinstance(single_per, list):
single_per = [single_per]
loop_over_kpers = single_per
for kper in loop_over_kpers:
# Fill missing early kpers with 0
if kper < first:
itmp = 0
kper_vtype = int
elif kper in kpers:
kper_data = self.__data[kper]
kper_vtype = self.__vtype[kper]
if kper_vtype == str:
if not self._model.array_free_format:
kper_data = self.__fromfile(kper_data)
kper_vtype = np.recarray
itmp = self.get_itmp(kper)
if kper_vtype == np.recarray:
itmp = kper_data.shape[0]
elif (kper_vtype == int) or (kper_vtype is None):
itmp = kper_data
# Fill late missing kpers with -1
else:
itmp = -1
kper_vtype = int
f.write(
" {0:9d} {1:9d} # stress period {2:d}\n".format(
itmp, 0, kper + 1
)
)
isExternal = False
if (
self._model.array_free_format
and self._model.external_path is not None
and forceInternal is False
):
isExternal = True
if self.__binary:
isExternal = True
if isExternal:
if kper_vtype == np.recarray:
py_filepath = ""
if self._model.model_ws is not None:
py_filepath = self._model.model_ws
if self._model.external_path is not None:
py_filepath = os.path.join(
py_filepath, self._model.external_path
)
filename = self.get_filename(kper)
py_filepath = os.path.join(py_filepath, filename)
model_filepath = filename
if self._model.external_path is not None:
model_filepath = os.path.join(
self._model.external_path, filename
)
self.__tofile(py_filepath, kper_data)
kper_vtype = str
kper_data = model_filepath
if kper_vtype == np.recarray:
name = f.name
if self.__binary or not numpy114:
f.close()
# switch file append mode to binary
with open(name, "ab+") as f:
self.__tofile(f, kper_data)
# continue back to non-binary
f = open(name, "a")
else:
self.__tofile(f, kper_data)
elif kper_vtype == str:
f.write(" open/close " + kper_data)
if self.__binary:
f.write(" (BINARY)")
f.write("\n")
def __tofile(self, f, data):
# Write the recarray (data) to the file (or file handle) f
assert isinstance(data, np.recarray), (
"MfList.__tofile() data arg " + "not a recarray"
)
# Add one to the kij indices
lnames = [name.lower() for name in self.dtype.names]
# --make copy of data for multiple calls
d = data.copy()
for idx in ["k", "i", "j", "node"]:
if idx in lnames:
d[idx] += 1
if self.__binary:
dtype2 = []
for name in self.dtype.names:
dtype2.append((name, np.float32))
dtype2 = np.dtype(dtype2)
d = np.array(d, dtype=dtype2)
d.tofile(f)
else:
np.savetxt(f, d, fmt=self.fmt_string, delimiter="")
def check_kij(self):
names = self.dtype.names
if ("k" not in names) or ("i" not in names) or ("j" not in names):
warnings.warn(
"MfList.check_kij(): index fieldnames 'k,i,j' "
+ "not found in self.dtype names: "
+ str(names)
)
return
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
if nl == 0:
warnings.warn(
"MfList.check_kij(): unable to get dis info from " + "model"
)
return
for kper in list(self.data.keys()):
out_idx = []
data = self[kper]
if data is not None:
k = data["k"]
k_idx = np.where(np.logical_or(k < 0, k >= nl))
if k_idx[0].shape[0] > 0:
out_idx.extend(list(k_idx[0]))
i = data["i"]
i_idx = np.where(np.logical_or(i < 0, i >= nr))
if i_idx[0].shape[0] > 0:
out_idx.extend(list(i_idx[0]))
j = data["j"]
j_idx = np.where(np.logical_or(j < 0, j >= nc))
if j_idx[0].shape[0]:
out_idx.extend(list(j_idx[0]))
if len(out_idx) > 0:
warn_str = (
"MfList.check_kij(): warning the following "
+ "indices are out of bounds in kper "
+ str(kper)
+ ":\n"
)
for idx in out_idx:
d = data[idx]
warn_str += " {0:9d} {1:9d} {2:9d}\n".format(
d["k"] + 1, d["i"] + 1, d["j"] + 1
)
warnings.warn(warn_str)
def __find_last_kper(self, kper):
kpers = list(self.data.keys())
kpers.sort()
last = 0
for kkper in kpers[::-1]:
# if this entry is valid
if self.vtype[kkper] != int or self.data[kkper] != -1:
last = kkper
if kkper <= kper:
break
return kkper
def get_indices(self):
"""
a helper function for plotting - get all unique indices
"""
names = self.dtype.names
lnames = []
[lnames.append(name.lower()) for name in names]
if "k" not in lnames or "j" not in lnames:
raise NotImplementedError("MfList.get_indices requires kij")
kpers = list(self.data.keys())
kpers.sort()
indices = []
for i, kper in enumerate(kpers):
kper_vtype = self.__vtype[kper]
if (kper_vtype != int) or (kper_vtype is not None):
d = self.data[kper]
if not indices:
indices = list(zip(d["k"], d["i"], d["j"]))
else:
new_indices = list(zip(d["k"], d["i"], d["j"]))
for ni in new_indices:
if ni not in indices:
indices.append(ni)
return indices
def attribute_by_kper(self, attr, function=np.mean, idx_val=None):
assert attr in self.dtype.names
if idx_val is not None:
assert idx_val[0] in self.dtype.names
kpers = list(self.data.keys())
kpers.sort()
values = []
for kper in range(0, max(self._model.nper, max(kpers))):
if kper < min(kpers):
values.append(0)
elif kper > max(kpers) or kper not in kpers:
values.append(values[-1])
else:
kper_data = self.__data[kper]
if idx_val is not None:
kper_data = kper_data[
np.where(kper_data[idx_val[0]] == idx_val[1])
]
# kper_vtype = self.__vtype[kper]
v = function(kper_data[attr])
values.append(v)
return values
def plot(
self,
key=None,
names=None,
kper=0,
filename_base=None,
file_extension=None,
mflay=None,
**kwargs
):
"""
Plot stress period boundary condition (MfList) data for a specified
stress period
Parameters
----------
key : str
MfList dictionary key. (default is None)
names : list
List of names for figure titles. (default is None)
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
**kwargs : dict
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
inactive : bool
Boolean used to determine if a black overlay in inactive
cells in a layer will be displayed. (default is True)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
Returns
----------
out : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis is returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.wel.stress_period_data.plot(ml.wel, kper=1)
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_mflist_helper(
self,
key=key,
names=names,
kper=kper,
filename_base=filename_base,
file_extension=file_extension,
mflay=mflay,
**kwargs
)
return axes
# MASKED: to_shapefile function (lines 1010-1062)
def to_array(self, kper=0, mask=False):
"""
Convert stress period boundary condition (MfList) data for a
specified stress period to a 3-D numpy array
Parameters
----------
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
mask : boolean
return array with np.NaN instead of zero
Returns
----------
out : dict of numpy.ndarrays
Dictionary of 3-D numpy arrays containing the stress period data for
a selected stress period. The dictionary keys are the MfList dtype
names for the stress period data ('cond', 'flux', 'bhead', etc.).
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> v = ml.wel.stress_period_data.to_array(kper=1)
"""
i0 = 3
unstructured = False
if "inode" in self.dtype.names:
raise NotImplementedError()
if "node" in self.dtype.names:
if "i" not in self.dtype.names and "j" not in self.dtype.names:
i0 = 1
unstructured = True
arrays = {}
for name in self.dtype.names[i0:]:
if not self.dtype.fields[name][0] == object:
if unstructured:
arr = np.zeros((self._model.nlay * self._model.ncpl,))
else:
arr = np.zeros(
(self._model.nlay, self._model.nrow, self._model.ncol)
)
arrays[name] = arr.copy()
# if this kper is not found
if kper not in self.data.keys():
kpers = list(self.data.keys())
kpers.sort()
# if this kper is before the first entry,
# (maybe) mask and return
if kper < kpers[0]:
if mask:
for name, arr in arrays.items():
arrays[name][:] = np.NaN
return arrays
# find the last kper
else:
kper = self.__find_last_kper(kper)
sarr = self.data[kper]
if np.isscalar(sarr):
# if there are no entries for this kper
if sarr == 0:
if mask:
for name, arr in arrays.items():
arrays[name][:] = np.NaN
return arrays
else:
raise Exception("MfList: something bad happened")
for name, arr in arrays.items():
if unstructured:
cnt = np.zeros(
(self._model.nlay * self._model.ncpl,), dtype=np.float
)
else:
cnt = np.zeros(
(self._model.nlay, self._model.nrow, self._model.ncol),
dtype=np.float,
)
# print(name,kper)
for rec in sarr:
if unstructured:
arr[rec["node"]] += rec[name]
cnt[rec["node"]] += 1.0
else:
arr[rec["k"], rec["i"], rec["j"]] += rec[name]
cnt[rec["k"], rec["i"], rec["j"]] += 1.0
# average keys that should not be added
if name not in ("cond", "flux"):
idx = cnt > 0.0
arr[idx] /= cnt[idx]
if mask:
arr = np.ma.masked_where(cnt == 0.0, arr)
arr[cnt == 0.0] = np.NaN
arrays[name] = arr.copy()
# elif mask:
# for name, arr in arrays.items():
# arrays[name][:] = np.NaN
return arrays
@property
def masked_4D_arrays(self):
# get the first kper
arrays = self.to_array(kper=0, mask=True)
# initialize these big arrays
m4ds = {}
for name, array in arrays.items():
m4d = np.zeros(
(
self._model.nper,
self._model.nlay,
self._model.nrow,
self._model.ncol,
)
)
m4d[0, :, :, :] = array
m4ds[name] = m4d
for kper in range(1, self._model.nper):
arrays = self.to_array(kper=kper, mask=True)
for name, array in arrays.items():
m4ds[name][kper, :, :, :] = array
return m4ds
def masked_4D_arrays_itr(self):
# get the first kper
arrays = self.to_array(kper=0, mask=True)
# initialize these big arrays
for name, array in arrays.items():
m4d = np.zeros(
(
self._model.nper,
self._model.nlay,
self._model.nrow,
self._model.ncol,
)
)
m4d[0, :, :, :] = array
for kper in range(1, self._model.nper):
arrays = self.to_array(kper=kper, mask=True)
for tname, array in arrays.items():
if tname == name:
m4d[kper, :, :, :] = array
yield name, m4d
@property
def array(self):
return self.masked_4D_arrays
@classmethod
def from_4d(cls, model, pak_name, m4ds):
"""construct an MfList instance from a dict of
(attribute_name,masked 4D ndarray
Parameters
----------
model : mbase derived type
pak_name : str package name (e.g GHB)
m4ds : {attribute name:4d masked numpy.ndarray}
Returns
-------
MfList instance
"""
sp_data = MfList.masked4D_arrays_to_stress_period_data(
model.get_package(pak_name).get_default_dtype(), m4ds
)
return cls(model.get_package(pak_name), data=sp_data)
@staticmethod
def masked4D_arrays_to_stress_period_data(dtype, m4ds):
""" convert a dictionary of 4-dim masked arrays to
a stress_period_data style dict of recarray
Parameters
----------
dtype : numpy dtype
m4ds : dict {name:masked numpy 4-dim ndarray}
Returns
-------
dict {kper:recarray}
"""
assert isinstance(m4ds, dict)
for name, m4d in m4ds.items():
assert isinstance(m4d, np.ndarray)
assert name in dtype.names
assert m4d.ndim == 4
keys = list(m4ds.keys())
for i1, key1 in enumerate(keys):
a1 = np.isnan(m4ds[key1])
for i2, key2 in enumerate(keys[i1:]):
a2 = np.isnan(m4ds[key2])
if not np.array_equal(a1, a2):
raise Exception(
"Transient2d error: masking not equal"
+ " for {0} and {1}".format(key1, key2)
)
sp_data = {}
for kper in range(m4d.shape[0]):
vals = {}
for name, m4d in m4ds.items():
arr = m4d[kper, :, :, :]
isnan = np.argwhere(~np.isnan(arr))
v = []
for k, i, j in isnan:
v.append(arr[k, i, j])
vals[name] = v
kk = isnan[:, 0]
ii = isnan[:, 1]
jj = isnan[:, 2]
spd = np.recarray(shape=isnan.shape[0], dtype=dtype)
spd["i"] = ii
spd["k"] = kk
spd["j"] = jj
for n, v in vals.items():
spd[n] = v
sp_data[kper] = spd
return sp_data | def to_shapefile(self, filename, kper=None):
"""
Export stress period boundary condition (MfList) data for a specified
stress period
Parameters
----------
filename : str
Shapefile name to write
kper : int
MODFLOW zero-based stress period number to return. (default is None)
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.wel.to_shapefile('test_hk.shp', kper=1)
"""
import warnings
warnings.warn(
"Deprecation warning: to_shapefile() is deprecated. use .export()"
)
# if self.sr is None:
# raise Exception("MfList.to_shapefile: SpatialReference not set")
# import flopy.utils.flopy_io as fio
# if kper is None:
# keys = self.data.keys()
# keys.sort()
# else:
# keys = [kper]
# array_dict = {}
# for kk in keys:
# arrays = self.to_array(kk)
# for name, array in arrays.items():
# for k in range(array.shape[0]):
# #aname = name+"{0:03d}_{1:02d}".format(kk, k)
# n = fio.shape_attr_name(name, length=4)
# aname = "{}{:03d}{:03d}".format(n, k+1, int(kk)+1)
# array_dict[aname] = array[k]
# fio.write_grid_shapefile(filename, self.sr, array_dict)
self.export(filename, kper=kper) | 1,010 | 1,062 | """
util_list module. Contains the mflist class.
This classes encapsulates modflow-style list inputs away
from the individual packages. The end-user should not need to
instantiate this class directly.
some more info
"""
from __future__ import division, print_function
import os
import warnings
import numpy as np
from ..datbase import DataInterface, DataListInterface, DataType
from ..utils.recarray_utils import create_empty_recarray
try:
from numpy.lib import NumpyVersion
numpy114 = NumpyVersion(np.__version__) >= "1.14.0"
except ImportError:
numpy114 = False
class MfList(DataInterface, DataListInterface):
"""
a generic object for handling transient boundary condition lists
Parameters
----------
package : package object
The package object (of type :class:`flopy.pakbase.Package`) to which
this MfList will be added.
data : varies
the data of the transient list (optional). (the default is None)
Attributes
----------
mxact : int
the max number of active bc for any stress period
Methods
-------
add_record(kper,index,value) : None
add a record to stress period kper at index location
write_transient(f) : None
write the transient sequence to the model input file f
check_kij() : None
checks for boundaries outside of model domain - issues warnings only
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(
self,
package,
data=None,
dtype=None,
model=None,
list_free_format=None,
binary=False,
):
if isinstance(data, MfList):
for attr in data.__dict__.items():
setattr(self, attr[0], attr[1])
if model is None:
self._model = package.parent
else:
self._model = model
self._package = package
return
self._package = package
if model is None:
self._model = package.parent
else:
self._model = model
if dtype is None:
assert isinstance(self.package.dtype, np.dtype)
self.__dtype = self.package.dtype
else:
self.__dtype = dtype
self.__binary = binary
self.__vtype = {}
self.__data = {}
if data is not None:
self.__cast_data(data)
self.__df = None
if list_free_format is None:
if package.parent.version == "mf2k":
list_free_format = False
self.list_free_format = list_free_format
return
@property
def name(self):
return self.package.name
@property
def mg(self):
return self._model.modelgrid
@property
def sr(self):
return self.mg.sr
@property
def model(self):
return self._model
@property
def package(self):
return self._package
@property
def data_type(self):
return DataType.transientlist
@property
def plotable(self):
return True
def get_empty(self, ncell=0):
d = create_empty_recarray(ncell, self.dtype, default_value=-1.0e10)
return d
def export(self, f, **kwargs):
from flopy import export
return export.utils.mflist_export(f, self, **kwargs)
def append(self, other):
""" append the recarrays from one MfList to another
Parameters
----------
other: variable: an item that can be cast in to an MfList
that corresponds with self
Returns
-------
dict of {kper:recarray}
"""
if not isinstance(other, MfList):
other = MfList(
self.package,
data=other,
dtype=self.dtype,
model=self._model,
list_free_format=self.list_free_format,
)
msg = (
"MfList.append(): other arg must be "
+ "MfList or dict, not {0}".format(type(other))
)
assert isinstance(other, MfList), msg
other_kpers = list(other.data.keys())
other_kpers.sort()
self_kpers = list(self.data.keys())
self_kpers.sort()
new_dict = {}
for kper in range(self._model.nper):
other_data = other[kper].copy()
self_data = self[kper].copy()
other_len = other_data.shape[0]
self_len = self_data.shape[0]
if (other_len == 0 and self_len == 0) or (
kper not in self_kpers and kper not in other_kpers
):
continue
elif self_len == 0:
new_dict[kper] = other_data
elif other_len == 0:
new_dict[kper] = self_data
else:
new_len = other_data.shape[0] + self_data.shape[0]
new_data = np.recarray(new_len, dtype=self.dtype)
new_data[:self_len] = self_data
new_data[self_len : self_len + other_len] = other_data
new_dict[kper] = new_data
return new_dict
def drop(self, fields):
"""drop fields from an MfList
Parameters
----------
fields : list or set of field names to drop
Returns
-------
dropped : MfList without the dropped fields
"""
if not isinstance(fields, list):
fields = [fields]
names = [n for n in self.dtype.names if n not in fields]
dtype = np.dtype(
[(k, d) for k, d in self.dtype.descr if k not in fields]
)
spd = {}
for k, v in self.data.items():
# because np 1.9 doesn't support indexing by list of columns
newarr = np.array([self.data[k][n] for n in names]).transpose()
newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(
np.recarray
)
for n in dtype.names:
newarr[n] = self.data[k][n]
spd[k] = newarr
return MfList(self.package, spd, dtype=dtype)
@property
def data(self):
return self.__data
@property
def df(self):
if self.__df is None:
self.__df = self.get_dataframe()
return self.__df
@property
def vtype(self):
return self.__vtype
@property
def dtype(self):
return self.__dtype
# Get the itmp for a given kper
def get_itmp(self, kper):
if kper not in list(self.__data.keys()):
return None
if self.__vtype[kper] is None:
return -1
# If an external file, have to load it
if self.__vtype[kper] == str:
return self.__fromfile(self.__data[kper]).shape[0]
if self.__vtype[kper] == np.recarray:
return self.__data[kper].shape[0]
# If not any of the above, it must be an int
return self.__data[kper]
@property
def mxact(self):
mxact = 0
for kper in list(self.__data.keys()):
mxact = max(mxact, self.get_itmp(kper))
return mxact
@property
def fmt_string(self):
"""Returns a C-style fmt string for numpy savetxt that corresponds to
the dtype"""
if self.list_free_format is not None:
use_free = self.list_free_format
else:
use_free = True
if self.package.parent.has_package("bas6"):
use_free = self.package.parent.bas6.ifrefm
# mt3d list data is fixed format
if "mt3d" in self.package.parent.version.lower():
use_free = False
fmts = []
for field in self.dtype.descr:
vtype = field[1][1].lower()
if vtype in ("i", "b"):
if use_free:
fmts.append("%9d")
else:
fmts.append("%10d")
elif vtype == "f":
if use_free:
if numpy114:
# Use numpy's floating-point formatter (Dragon4)
fmts.append("%15s")
else:
fmts.append("%15.7E")
else:
fmts.append("%10G")
elif vtype == "o":
if use_free:
fmts.append("%9s")
else:
fmts.append("%10s")
elif vtype == "s":
msg = (
"MfList.fmt_string error: 'str' type found in dtype. "
"This gives unpredictable results when "
"recarray to file - change to 'object' type"
)
raise TypeError(msg)
else:
raise TypeError(
"MfList.fmt_string error: unknown vtype in "
"field: {}".format(field)
)
if use_free:
fmt_string = " " + " ".join(fmts)
else:
fmt_string = "".join(fmts)
return fmt_string
# Private method to cast the data argument
# Should only be called by the constructor
def __cast_data(self, data):
# If data is a list, then all we can do is try to cast it to
# an ndarray, then cast again to a recarray
if isinstance(data, list):
# warnings.warn("MfList casting list to array")
try:
data = np.array(data)
except Exception as e:
raise Exception(
"MfList error: casting list to ndarray: " + str(e)
)
# If data is a dict, the we have to assume it is keyed on kper
if isinstance(data, dict):
if not list(data.keys()):
raise Exception("MfList error: data dict is empty")
for kper, d in data.items():
try:
kper = int(kper)
except Exception as e:
raise Exception(
"MfList error: data dict key "
+ "{0:s} not integer: ".format(kper)
+ str(type(kper))
+ "\n"
+ str(e)
)
# Same as before, just try...
if isinstance(d, list):
# warnings.warn("MfList: casting list to array at " +\
# "kper {0:d}".format(kper))
try:
d = np.array(d)
except Exception as e:
raise Exception(
"MfList error: casting list "
+ "to ndarray: "
+ str(e)
)
# super hack - sick of recarrays already
# if (isinstance(d,np.ndarray) and len(d.dtype.fields) > 1):
# d = d.view(np.recarray)
if isinstance(d, np.recarray):
self.__cast_recarray(kper, d)
elif isinstance(d, np.ndarray):
self.__cast_ndarray(kper, d)
elif isinstance(d, int):
self.__cast_int(kper, d)
elif isinstance(d, str):
self.__cast_str(kper, d)
elif d is None:
self.__data[kper] = -1
self.__vtype[kper] = None
else:
raise Exception(
"MfList error: unsupported data type: "
+ str(type(d))
+ " at kper "
+ "{0:d}".format(kper)
)
# A single recarray - same MfList for all stress periods
elif isinstance(data, np.recarray):
self.__cast_recarray(0, data)
# A single ndarray
elif isinstance(data, np.ndarray):
self.__cast_ndarray(0, data)
# A single filename
elif isinstance(data, str):
self.__cast_str(0, data)
else:
raise Exception(
"MfList error: unsupported data type: " + str(type(data))
)
def __cast_str(self, kper, d):
# If d is a string, assume it is a filename and check that it exists
assert os.path.exists(d), (
"MfList error: dict filename (string) '"
+ d
+ "' value for "
+ "kper {0:d} not found".format(kper)
)
self.__data[kper] = d
self.__vtype[kper] = str
def __cast_int(self, kper, d):
# If d is an integer, then it must be 0 or -1
if d > 0:
raise Exception(
"MfList error: dict integer value for "
"kper {0:10d} must be 0 or -1, "
"not {1:10d}".format(kper, d)
)
if d == 0:
self.__data[kper] = 0
self.__vtype[kper] = None
else:
self.__data[kper] = -1
self.__vtype[kper] = None
def __cast_recarray(self, kper, d):
assert d.dtype == self.__dtype, (
"MfList error: recarray dtype: "
+ str(d.dtype)
+ " doesn't match "
+ "self dtype: "
+ str(self.dtype)
)
self.__data[kper] = d
self.__vtype[kper] = np.recarray
def __cast_ndarray(self, kper, d):
d = np.atleast_2d(d)
if d.dtype != self.__dtype:
assert d.shape[1] == len(self.dtype), (
"MfList error: ndarray "
+ "shape "
+ str(d.shape)
+ " doesn't match dtype "
+ "len: "
+ str(len(self.dtype))
)
# warnings.warn("MfList: ndarray dtype does not match self " +\
# "dtype, trying to cast")
try:
self.__data[kper] = np.core.records.fromarrays(
d.transpose(), dtype=self.dtype
)
except Exception as e:
raise Exception(
"MfList error: casting ndarray to recarray: " + str(e)
)
self.__vtype[kper] = np.recarray
def get_dataframe(self, squeeze=True):
"""
Cast recarrays for stress periods into single
dataframe containing all stress periods.
Parameters
----------
squeeze : bool
Reduce number of columns in dataframe to only include
stress periods where a variable changes.
Returns
-------
df : dataframe
Dataframe of shape nrow = ncells, ncol = nvar x nper. If
the squeeze option is chosen, nper is the number of
stress periods where at least one cells is different,
otherwise it is equal to the number of keys in MfList.data.
Notes
-----
Requires pandas.
"""
try:
import pandas as pd
except Exception as e:
msg = "MfList.get_dataframe() requires pandas"
raise ImportError(msg)
# make a dataframe of all data for all stress periods
names = ["k", "i", "j"]
if "MNW2" in self.package.name:
names += ["wellid"]
# find relevant variable names
# may have to iterate over the first stress period
for per in range(self._model.nper):
if hasattr(self.data[per], "dtype"):
varnames = list(
[n for n in self.data[per].dtype.names if n not in names]
)
break
# create list of dataframes for each stress period
# each with index of k, i, j
dfs = []
for per in self.data.keys():
recs = self.data[per]
if recs is None or len(recs) == 0:
# add an empty dataframe if a stress period is
# empty (e.g. no pumping during a predevelopment
# period)
columns = names + list(
["{}{}".format(c, per) for c in varnames]
)
dfi = pd.DataFrame(data=None, columns=columns)
dfi = dfi.set_index(names)
else:
dfi = pd.DataFrame.from_records(recs)
dfg = dfi.groupby(names)
count = dfg[varnames[0]].count().rename("n")
if (count > 1).values.any():
print(
"Duplicated list entry locations aggregated "
"for kper {}".format(per)
)
for kij in count[count > 1].index.values:
print(" (k,i,j) {}".format(kij))
dfi = dfg.sum() # aggregate
dfi.columns = list(["{}{}".format(c, per) for c in varnames])
dfs.append(dfi)
df = pd.concat(dfs, axis=1)
if squeeze:
keep = []
for var in varnames:
diffcols = list([n for n in df.columns if var in n])
diff = df[diffcols].fillna(0).diff(axis=1)
diff[
"{}0".format(var)
] = 1 # always return the first stress period
changed = diff.sum(axis=0) != 0
keep.append(df.loc[:, changed.index[changed]])
df = pd.concat(keep, axis=1)
df = df.reset_index()
df.insert(len(names), "node", df.i * self._model.ncol + df.j)
return df
def add_record(self, kper, index, values):
# Add a record to possible already set list for a given kper
# index is a list of k,i,j or nodes.
# values is a list of floats.
# The length of index + values must be equal to the number of names
# in dtype
assert len(index) + len(values) == len(self.dtype), (
"MfList.add_record() error: length of index arg +"
+ "length of value arg != length of self dtype"
)
# If we already have something for this kper, then add to it
if kper in list(self.__data.keys()):
if self.vtype[kper] == int:
# If a 0 or -1, reset
self.__data[kper] = self.get_empty(1)
self.__vtype[kper] = np.recarray
elif self.vtype[kper] == str:
# If filename, load into recarray
d = self.__fromfile(self.data[kper])
d.resize(d.shape[0], d.shape[1])
self.__data[kper] = d
self.__vtype[kper] = np.recarray
elif self.vtype[kper] == np.recarray:
# Extend the recarray
self.__data[kper] = np.append(
self.__data[kper], self.get_empty(1)
)
else:
self.__data[kper] = self.get_empty(1)
self.__vtype[kper] = np.recarray
rec = list(index)
rec.extend(list(values))
try:
self.__data[kper][-1] = tuple(rec)
except Exception as e:
raise Exception(
"MfList.add_record() error: adding record to "
+ "recarray: "
+ str(e)
)
def __getitem__(self, kper):
# Get the recarray for a given kper
# If the data entry for kper is a string,
# return the corresponding recarray,
# but don't reset the value in the data dict
# assert kper in list(self.data.keys()), "MfList.__getitem__() kper " + \
# str(kper) + " not in data.keys()"
try:
kper = int(kper)
except Exception as e:
raise Exception(
"MfList error: _getitem__() passed invalid kper index:"
+ str(kper)
)
if kper not in list(self.data.keys()):
if kper == 0:
return self.get_empty()
else:
return self.data[self.__find_last_kper(kper)]
if self.vtype[kper] == int:
if self.data[kper] == 0:
return self.get_empty()
else:
return self.data[self.__find_last_kper(kper)]
if self.vtype[kper] == str:
return self.__fromfile(self.data[kper])
if self.vtype[kper] == np.recarray:
return self.data[kper]
def __setitem__(self, kper, data):
if kper in list(self.__data.keys()):
if self._model.verbose:
print("removing existing data for kper={}".format(kper))
self.data.pop(kper)
# If data is a list, then all we can do is try to cast it to
# an ndarray, then cast again to a recarray
if isinstance(data, list):
# warnings.warn("MfList casting list to array")
try:
data = np.array(data)
except Exception as e:
raise Exception(
"MfList error: casting list to ndarray: " + str(e)
)
# cast data
if isinstance(data, int):
self.__cast_int(kper, data)
elif isinstance(data, np.recarray):
self.__cast_recarray(kper, data)
# A single ndarray
elif isinstance(data, np.ndarray):
self.__cast_ndarray(kper, data)
# A single filename
elif isinstance(data, str):
self.__cast_str(kper, data)
else:
raise Exception(
"MfList error: unsupported data type: " + str(type(data))
)
# raise NotImplementedError("MfList.__setitem__() not implemented")
def __fromfile(self, f):
# d = np.fromfile(f,dtype=self.dtype,count=count)
try:
d = np.genfromtxt(f, dtype=self.dtype)
except Exception as e:
raise Exception(
"MfList.__fromfile() error reading recarray "
+ "from file "
+ str(e)
)
return d
def get_filenames(self):
kpers = list(self.data.keys())
kpers.sort()
filenames = []
first = kpers[0]
for kper in list(range(0, max(self._model.nper, max(kpers) + 1))):
# Fill missing early kpers with 0
if kper < first:
itmp = 0
kper_vtype = int
elif kper in kpers:
kper_vtype = self.__vtype[kper]
if (
self._model.array_free_format
and self._model.external_path is not None
):
# py_filepath = ''
# py_filepath = os.path.join(py_filepath,
# self._model.external_path)
filename = self.package.name[0] + "_{0:04d}.dat".format(kper)
filenames.append(filename)
return filenames
def get_filename(self, kper):
ext = "dat"
if self.binary:
ext = "bin"
return self.package.name[0] + "_{0:04d}.{1}".format(kper, ext)
@property
def binary(self):
return bool(self.__binary)
def write_transient(self, f, single_per=None, forceInternal=False):
# forceInternal overrides isExternal (set below) for cases where
# external arrays are not supported (oh hello MNW1!)
# write the transient sequence described by the data dict
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
assert hasattr(f, "read"), (
"MfList.write() error: " + "f argument must be a file handle"
)
kpers = list(self.data.keys())
kpers.sort()
first = kpers[0]
if single_per is None:
loop_over_kpers = list(range(0, max(nper, max(kpers) + 1)))
else:
if not isinstance(single_per, list):
single_per = [single_per]
loop_over_kpers = single_per
for kper in loop_over_kpers:
# Fill missing early kpers with 0
if kper < first:
itmp = 0
kper_vtype = int
elif kper in kpers:
kper_data = self.__data[kper]
kper_vtype = self.__vtype[kper]
if kper_vtype == str:
if not self._model.array_free_format:
kper_data = self.__fromfile(kper_data)
kper_vtype = np.recarray
itmp = self.get_itmp(kper)
if kper_vtype == np.recarray:
itmp = kper_data.shape[0]
elif (kper_vtype == int) or (kper_vtype is None):
itmp = kper_data
# Fill late missing kpers with -1
else:
itmp = -1
kper_vtype = int
f.write(
" {0:9d} {1:9d} # stress period {2:d}\n".format(
itmp, 0, kper + 1
)
)
isExternal = False
if (
self._model.array_free_format
and self._model.external_path is not None
and forceInternal is False
):
isExternal = True
if self.__binary:
isExternal = True
if isExternal:
if kper_vtype == np.recarray:
py_filepath = ""
if self._model.model_ws is not None:
py_filepath = self._model.model_ws
if self._model.external_path is not None:
py_filepath = os.path.join(
py_filepath, self._model.external_path
)
filename = self.get_filename(kper)
py_filepath = os.path.join(py_filepath, filename)
model_filepath = filename
if self._model.external_path is not None:
model_filepath = os.path.join(
self._model.external_path, filename
)
self.__tofile(py_filepath, kper_data)
kper_vtype = str
kper_data = model_filepath
if kper_vtype == np.recarray:
name = f.name
if self.__binary or not numpy114:
f.close()
# switch file append mode to binary
with open(name, "ab+") as f:
self.__tofile(f, kper_data)
# continue back to non-binary
f = open(name, "a")
else:
self.__tofile(f, kper_data)
elif kper_vtype == str:
f.write(" open/close " + kper_data)
if self.__binary:
f.write(" (BINARY)")
f.write("\n")
def __tofile(self, f, data):
# Write the recarray (data) to the file (or file handle) f
assert isinstance(data, np.recarray), (
"MfList.__tofile() data arg " + "not a recarray"
)
# Add one to the kij indices
lnames = [name.lower() for name in self.dtype.names]
# --make copy of data for multiple calls
d = data.copy()
for idx in ["k", "i", "j", "node"]:
if idx in lnames:
d[idx] += 1
if self.__binary:
dtype2 = []
for name in self.dtype.names:
dtype2.append((name, np.float32))
dtype2 = np.dtype(dtype2)
d = np.array(d, dtype=dtype2)
d.tofile(f)
else:
np.savetxt(f, d, fmt=self.fmt_string, delimiter="")
def check_kij(self):
names = self.dtype.names
if ("k" not in names) or ("i" not in names) or ("j" not in names):
warnings.warn(
"MfList.check_kij(): index fieldnames 'k,i,j' "
+ "not found in self.dtype names: "
+ str(names)
)
return
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
if nl == 0:
warnings.warn(
"MfList.check_kij(): unable to get dis info from " + "model"
)
return
for kper in list(self.data.keys()):
out_idx = []
data = self[kper]
if data is not None:
k = data["k"]
k_idx = np.where(np.logical_or(k < 0, k >= nl))
if k_idx[0].shape[0] > 0:
out_idx.extend(list(k_idx[0]))
i = data["i"]
i_idx = np.where(np.logical_or(i < 0, i >= nr))
if i_idx[0].shape[0] > 0:
out_idx.extend(list(i_idx[0]))
j = data["j"]
j_idx = np.where(np.logical_or(j < 0, j >= nc))
if j_idx[0].shape[0]:
out_idx.extend(list(j_idx[0]))
if len(out_idx) > 0:
warn_str = (
"MfList.check_kij(): warning the following "
+ "indices are out of bounds in kper "
+ str(kper)
+ ":\n"
)
for idx in out_idx:
d = data[idx]
warn_str += " {0:9d} {1:9d} {2:9d}\n".format(
d["k"] + 1, d["i"] + 1, d["j"] + 1
)
warnings.warn(warn_str)
def __find_last_kper(self, kper):
kpers = list(self.data.keys())
kpers.sort()
last = 0
for kkper in kpers[::-1]:
# if this entry is valid
if self.vtype[kkper] != int or self.data[kkper] != -1:
last = kkper
if kkper <= kper:
break
return kkper
def get_indices(self):
"""
a helper function for plotting - get all unique indices
"""
names = self.dtype.names
lnames = []
[lnames.append(name.lower()) for name in names]
if "k" not in lnames or "j" not in lnames:
raise NotImplementedError("MfList.get_indices requires kij")
kpers = list(self.data.keys())
kpers.sort()
indices = []
for i, kper in enumerate(kpers):
kper_vtype = self.__vtype[kper]
if (kper_vtype != int) or (kper_vtype is not None):
d = self.data[kper]
if not indices:
indices = list(zip(d["k"], d["i"], d["j"]))
else:
new_indices = list(zip(d["k"], d["i"], d["j"]))
for ni in new_indices:
if ni not in indices:
indices.append(ni)
return indices
def attribute_by_kper(self, attr, function=np.mean, idx_val=None):
assert attr in self.dtype.names
if idx_val is not None:
assert idx_val[0] in self.dtype.names
kpers = list(self.data.keys())
kpers.sort()
values = []
for kper in range(0, max(self._model.nper, max(kpers))):
if kper < min(kpers):
values.append(0)
elif kper > max(kpers) or kper not in kpers:
values.append(values[-1])
else:
kper_data = self.__data[kper]
if idx_val is not None:
kper_data = kper_data[
np.where(kper_data[idx_val[0]] == idx_val[1])
]
# kper_vtype = self.__vtype[kper]
v = function(kper_data[attr])
values.append(v)
return values
def plot(
self,
key=None,
names=None,
kper=0,
filename_base=None,
file_extension=None,
mflay=None,
**kwargs
):
"""
Plot stress period boundary condition (MfList) data for a specified
stress period
Parameters
----------
key : str
MfList dictionary key. (default is None)
names : list
List of names for figure titles. (default is None)
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
**kwargs : dict
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
inactive : bool
Boolean used to determine if a black overlay in inactive
cells in a layer will be displayed. (default is True)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
Returns
----------
out : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis is returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.wel.stress_period_data.plot(ml.wel, kper=1)
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_mflist_helper(
self,
key=key,
names=names,
kper=kper,
filename_base=filename_base,
file_extension=file_extension,
mflay=mflay,
**kwargs
)
return axes
def to_shapefile(self, filename, kper=None):
"""
Export stress period boundary condition (MfList) data for a specified
stress period
Parameters
----------
filename : str
Shapefile name to write
kper : int
MODFLOW zero-based stress period number to return. (default is None)
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.wel.to_shapefile('test_hk.shp', kper=1)
"""
import warnings
warnings.warn(
"Deprecation warning: to_shapefile() is deprecated. use .export()"
)
# if self.sr is None:
# raise Exception("MfList.to_shapefile: SpatialReference not set")
# import flopy.utils.flopy_io as fio
# if kper is None:
# keys = self.data.keys()
# keys.sort()
# else:
# keys = [kper]
# array_dict = {}
# for kk in keys:
# arrays = self.to_array(kk)
# for name, array in arrays.items():
# for k in range(array.shape[0]):
# #aname = name+"{0:03d}_{1:02d}".format(kk, k)
# n = fio.shape_attr_name(name, length=4)
# aname = "{}{:03d}{:03d}".format(n, k+1, int(kk)+1)
# array_dict[aname] = array[k]
# fio.write_grid_shapefile(filename, self.sr, array_dict)
self.export(filename, kper=kper)
def to_array(self, kper=0, mask=False):
"""
Convert stress period boundary condition (MfList) data for a
specified stress period to a 3-D numpy array
Parameters
----------
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
mask : boolean
return array with np.NaN instead of zero
Returns
----------
out : dict of numpy.ndarrays
Dictionary of 3-D numpy arrays containing the stress period data for
a selected stress period. The dictionary keys are the MfList dtype
names for the stress period data ('cond', 'flux', 'bhead', etc.).
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> v = ml.wel.stress_period_data.to_array(kper=1)
"""
i0 = 3
unstructured = False
if "inode" in self.dtype.names:
raise NotImplementedError()
if "node" in self.dtype.names:
if "i" not in self.dtype.names and "j" not in self.dtype.names:
i0 = 1
unstructured = True
arrays = {}
for name in self.dtype.names[i0:]:
if not self.dtype.fields[name][0] == object:
if unstructured:
arr = np.zeros((self._model.nlay * self._model.ncpl,))
else:
arr = np.zeros(
(self._model.nlay, self._model.nrow, self._model.ncol)
)
arrays[name] = arr.copy()
# if this kper is not found
if kper not in self.data.keys():
kpers = list(self.data.keys())
kpers.sort()
# if this kper is before the first entry,
# (maybe) mask and return
if kper < kpers[0]:
if mask:
for name, arr in arrays.items():
arrays[name][:] = np.NaN
return arrays
# find the last kper
else:
kper = self.__find_last_kper(kper)
sarr = self.data[kper]
if np.isscalar(sarr):
# if there are no entries for this kper
if sarr == 0:
if mask:
for name, arr in arrays.items():
arrays[name][:] = np.NaN
return arrays
else:
raise Exception("MfList: something bad happened")
for name, arr in arrays.items():
if unstructured:
cnt = np.zeros(
(self._model.nlay * self._model.ncpl,), dtype=np.float
)
else:
cnt = np.zeros(
(self._model.nlay, self._model.nrow, self._model.ncol),
dtype=np.float,
)
# print(name,kper)
for rec in sarr:
if unstructured:
arr[rec["node"]] += rec[name]
cnt[rec["node"]] += 1.0
else:
arr[rec["k"], rec["i"], rec["j"]] += rec[name]
cnt[rec["k"], rec["i"], rec["j"]] += 1.0
# average keys that should not be added
if name not in ("cond", "flux"):
idx = cnt > 0.0
arr[idx] /= cnt[idx]
if mask:
arr = np.ma.masked_where(cnt == 0.0, arr)
arr[cnt == 0.0] = np.NaN
arrays[name] = arr.copy()
# elif mask:
# for name, arr in arrays.items():
# arrays[name][:] = np.NaN
return arrays
@property
def masked_4D_arrays(self):
# get the first kper
arrays = self.to_array(kper=0, mask=True)
# initialize these big arrays
m4ds = {}
for name, array in arrays.items():
m4d = np.zeros(
(
self._model.nper,
self._model.nlay,
self._model.nrow,
self._model.ncol,
)
)
m4d[0, :, :, :] = array
m4ds[name] = m4d
for kper in range(1, self._model.nper):
arrays = self.to_array(kper=kper, mask=True)
for name, array in arrays.items():
m4ds[name][kper, :, :, :] = array
return m4ds
def masked_4D_arrays_itr(self):
# get the first kper
arrays = self.to_array(kper=0, mask=True)
# initialize these big arrays
for name, array in arrays.items():
m4d = np.zeros(
(
self._model.nper,
self._model.nlay,
self._model.nrow,
self._model.ncol,
)
)
m4d[0, :, :, :] = array
for kper in range(1, self._model.nper):
arrays = self.to_array(kper=kper, mask=True)
for tname, array in arrays.items():
if tname == name:
m4d[kper, :, :, :] = array
yield name, m4d
@property
def array(self):
return self.masked_4D_arrays
@classmethod
def from_4d(cls, model, pak_name, m4ds):
"""construct an MfList instance from a dict of
(attribute_name,masked 4D ndarray
Parameters
----------
model : mbase derived type
pak_name : str package name (e.g GHB)
m4ds : {attribute name:4d masked numpy.ndarray}
Returns
-------
MfList instance
"""
sp_data = MfList.masked4D_arrays_to_stress_period_data(
model.get_package(pak_name).get_default_dtype(), m4ds
)
return cls(model.get_package(pak_name), data=sp_data)
@staticmethod
def masked4D_arrays_to_stress_period_data(dtype, m4ds):
""" convert a dictionary of 4-dim masked arrays to
a stress_period_data style dict of recarray
Parameters
----------
dtype : numpy dtype
m4ds : dict {name:masked numpy 4-dim ndarray}
Returns
-------
dict {kper:recarray}
"""
assert isinstance(m4ds, dict)
for name, m4d in m4ds.items():
assert isinstance(m4d, np.ndarray)
assert name in dtype.names
assert m4d.ndim == 4
keys = list(m4ds.keys())
for i1, key1 in enumerate(keys):
a1 = np.isnan(m4ds[key1])
for i2, key2 in enumerate(keys[i1:]):
a2 = np.isnan(m4ds[key2])
if not np.array_equal(a1, a2):
raise Exception(
"Transient2d error: masking not equal"
+ " for {0} and {1}".format(key1, key2)
)
sp_data = {}
for kper in range(m4d.shape[0]):
vals = {}
for name, m4d in m4ds.items():
arr = m4d[kper, :, :, :]
isnan = np.argwhere(~np.isnan(arr))
v = []
for k, i, j in isnan:
v.append(arr[k, i, j])
vals[name] = v
kk = isnan[:, 0]
ii = isnan[:, 1]
jj = isnan[:, 2]
spd = np.recarray(shape=isnan.shape[0], dtype=dtype)
spd["i"] = ii
spd["k"] = kk
spd["j"] = jj
for n, v in vals.items():
spd[n] = v
sp_data[kper] = spd
return sp_data
|
get_precision | Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data. | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin
from ..utils.validation import check_is_fitted
from abc import ABCMeta, abstractmethod
class _BasePCA(
_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta
):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array of shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
# MASKED: get_precision function (lines 49-79)
@abstractmethod
def fit(self, X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Projection of X in the first principal components, where `n_samples`
is the number of samples and `n_components` is the number of the components.
"""
check_is_fitted(self)
X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space.
In other words, return an input `X_original` whose transform would be X.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data, where `n_samples` is the number of samples
and `n_components` is the number of components.
Returns
-------
X_original array-like of shape (n_samples, n_features)
Original data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return (
np.dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_,
)
+ self.mean_
)
else:
return np.dot(X, self.components_) + self.mean_
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0] | def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[:: len(precision) + 1] += 1.0 / exp_var_diff
precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_
return precision | 49 | 79 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin
from ..utils.validation import check_is_fitted
from abc import ABCMeta, abstractmethod
class _BasePCA(
_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta
):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array of shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[:: len(precision) + 1] += 1.0 / exp_var_diff
precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_
return precision
@abstractmethod
def fit(self, X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Projection of X in the first principal components, where `n_samples`
is the number of samples and `n_components` is the number of the components.
"""
check_is_fitted(self)
X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space.
In other words, return an input `X_original` whose transform would be X.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data, where `n_samples` is the number of samples
and `n_components` is the number of components.
Returns
-------
X_original array-like of shape (n_samples, n_features)
Original data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return (
np.dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_,
)
+ self.mean_
)
else:
return np.dot(X, self.components_) + self.mean_
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
|
_reverse_seq | Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
# pylint: disable=protected-access
_state_size_with_prefix = rnn_cell_impl._state_size_with_prefix
# pylint: enable=protected-access
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
def _on_device(fn, device):
"""Build the subgraph defined by lambda `fn` on `device` if it's not None."""
if device:
with ops.device(device):
return fn()
else:
return fn()
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
copy_cond = (time >= sequence_length)
return _on_device(
lambda: array_ops.where(copy_cond, output, new_output),
device=new_output.op.device)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
# MASKED: _reverse_seq function (lines 209-252)
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs. The input_size of forward and
backward cell must match. The initial state for both directions is zero by
default (but can be set optionally) and no intermediate states are ever
returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, input_size]`.
[batch_size, input_size].
sequence_length: An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
# pylint: disable=protected-access
if not isinstance(cell_fw, rnn_cell_impl._RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell_impl._RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
# pylint: enable=protected-access
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = array_ops.reverse_sequence(
input=inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = array_ops.reverse_sequence(
input=tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`, one for
each frame. Instead, `inputs` may be a single `Tensor` where
the maximum time is either the first or second dimension (see the parameter
`time_major`). Alternatively, it may be a (possibly nested) tuple of
Tensors, each of them having matching batch and time dimensions.
The corresponding output is either a single `Tensor` having the same number
of time steps and batch size, or a (possibly nested) tuple of such tensors,
matching the nested structure of `cell.output_size`.
The parameter `sequence_length` is optional and is used to copy-through state
and zero-out outputs when past a batch element's sequence length. So it's more
for correctness than performance, unlike in rnn().
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
# pylint: disable=protected-access
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# pylint: enable=protected-access
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = tuple(array_ops.transpose(input_, [1, 0, 2])
for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = tuple(array_ops.shape(input_) for input_ in flat_input)
batch_size = input_shape[0][1]
for input_ in input_shape:
if input_[1].get_shape() != batch_size.get_shape():
raise ValueError("All inputs should have the same batch size")
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
flat_output = nest.flatten(outputs)
flat_output = [array_ops.transpose(output, [1, 0, 2])
for output in flat_output]
outputs = nest.pack_sequence_as(
structure=outputs, flat_sequence=flat_output)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = input_shape[1]
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _state_size_with_prefix(size, prefix=[batch_size])
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[0].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
return (time + 1, output_ta_t, new_state)
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _state_size_with_prefix(
output_size, prefix=[const_time_steps, const_batch_size])
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state)
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, _, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors with shapes and structure matching `cell.output_size`
and `cell_output` above. The parameter `cell_state` and output
`next_cell_state` may be either a single or (possibly nested) tuple
of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
# pylint: disable=protected-access
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# pylint: enable=protected-access
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.get_shape() for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i, dynamic_size=True, size=0, name="rnn_output_%d" % i)
for i, dtype_i in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(
_state_size_with_prefix(size_i, prefix=[batch_size]),
dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
# pylint: disable=g-long-lambda,cell-var-from-loop
result_flat = [
_on_device(
lambda: array_ops.where(
elements_finished, current_i, candidate_i),
device=candidate_i.op.device)
for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
# pylint: enable=g-long-lambda,cell-var-from-loop
return nest.pack_sequence_as(
structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [
ta.write(time, emit)
for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(
structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state) | def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.stack(sequence)
# TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
if lengths is not None:
lengths = math_ops.to_int64(lengths)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results | 209 | 252 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
# pylint: disable=protected-access
_state_size_with_prefix = rnn_cell_impl._state_size_with_prefix
# pylint: enable=protected-access
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
def _on_device(fn, device):
"""Build the subgraph defined by lambda `fn` on `device` if it's not None."""
if device:
with ops.device(device):
return fn()
else:
return fn()
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
copy_cond = (time >= sequence_length)
return _on_device(
lambda: array_ops.where(copy_cond, output, new_output),
device=new_output.op.device)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.stack(sequence)
# TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
if lengths is not None:
lengths = math_ops.to_int64(lengths)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs. The input_size of forward and
backward cell must match. The initial state for both directions is zero by
default (but can be set optionally) and no intermediate states are ever
returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, input_size]`.
[batch_size, input_size].
sequence_length: An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
# pylint: disable=protected-access
if not isinstance(cell_fw, rnn_cell_impl._RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell_impl._RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
# pylint: enable=protected-access
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = array_ops.reverse_sequence(
input=inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = array_ops.reverse_sequence(
input=tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`, one for
each frame. Instead, `inputs` may be a single `Tensor` where
the maximum time is either the first or second dimension (see the parameter
`time_major`). Alternatively, it may be a (possibly nested) tuple of
Tensors, each of them having matching batch and time dimensions.
The corresponding output is either a single `Tensor` having the same number
of time steps and batch size, or a (possibly nested) tuple of such tensors,
matching the nested structure of `cell.output_size`.
The parameter `sequence_length` is optional and is used to copy-through state
and zero-out outputs when past a batch element's sequence length. So it's more
for correctness than performance, unlike in rnn().
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
# pylint: disable=protected-access
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# pylint: enable=protected-access
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = tuple(array_ops.transpose(input_, [1, 0, 2])
for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = tuple(array_ops.shape(input_) for input_ in flat_input)
batch_size = input_shape[0][1]
for input_ in input_shape:
if input_[1].get_shape() != batch_size.get_shape():
raise ValueError("All inputs should have the same batch size")
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
flat_output = nest.flatten(outputs)
flat_output = [array_ops.transpose(output, [1, 0, 2])
for output in flat_output]
outputs = nest.pack_sequence_as(
structure=outputs, flat_sequence=flat_output)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = input_shape[1]
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _state_size_with_prefix(size, prefix=[batch_size])
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[0].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
return (time + 1, output_ta_t, new_state)
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _state_size_with_prefix(
output_size, prefix=[const_time_steps, const_batch_size])
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state)
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, _, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors with shapes and structure matching `cell.output_size`
and `cell_output` above. The parameter `cell_state` and output
`next_cell_state` may be either a single or (possibly nested) tuple
of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
# pylint: disable=protected-access
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# pylint: enable=protected-access
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.get_shape() for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i, dynamic_size=True, size=0, name="rnn_output_%d" % i)
for i, dtype_i in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(
_state_size_with_prefix(size_i, prefix=[batch_size]),
dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
# pylint: disable=g-long-lambda,cell-var-from-loop
result_flat = [
_on_device(
lambda: array_ops.where(
elements_finished, current_i, candidate_i),
device=candidate_i.op.device)
for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
# pylint: enable=g-long-lambda,cell-var-from-loop
return nest.pack_sequence_as(
structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [
ta.write(time, emit)
for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(
structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
|
_get_ports | Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use. | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
# MASKED: _get_ports function (lines 549-573)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
| def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port) | 549 | 573 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
|
close | Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks. | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
# MASKED: close function (lines 725-752)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
| def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc | 725 | 752 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
|
stop_preview | Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing. | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
# MASKED: stop_preview function (lines 804-816)
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
| def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT]) | 804 | 816 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
|
_disable_camera | An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist). | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
# MASKED: _disable_camera function (lines 1961-1968)
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
| def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable() | 1,961 | 1,968 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
|
_configure_camera | An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic). | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
# MASKED: _configure_camera function (lines 2005-2091)
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
| def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise | 2,005 | 2,091 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
|
all_valid_variations | Returns all leet variations of a triplet which result in a
Base32 only charset words on base64 encoding
Args:
word: An english triplet
Returns:
list: of all valid variations | """inter-base steganography
producing base32 and base64 decodable strings"""
from base64 import b64encode, b64decode
import string
from itertools import product
from argparse import ArgumentParser
CHARSET = string.printable.encode()
B32_CHARSET = (string.ascii_uppercase + '234567').encode()
B64_CHARSET = (
string.ascii_lowercase +
string.ascii_uppercase +
string.digits +
'+/').encode()
ASCII_LOWER = string.ascii_lowercase.encode()
WHITESPACE = string.whitespace.encode()
ALPHA_SPACE = (
string.ascii_uppercase +
string.ascii_lowercase +
string.whitespace).encode()
ASCII_SUBS = {"a": ["a", "A", "4", "@"],
"b": ["b", "B", "8", "6"],
"c": ["c", "C", "("],
"d": ["d", "D"],
"e": ["e", "E", "3"],
"f": ["f", "F"],
"g": ["g", "G", "6", "9"],
"h": ["h", "H", "#"],
"i": ["i", "I", "1", "|", "!"],
"j": ["j", "J", "]", ";"],
"k": ["k", "K"],
"l": ["l", "L", "1", "|"],
"m": ["m", "M"],
"n": ["n", "N"],
"o": ["o", "O", "0"],
"p": ["p", "P"],
"q": ["q", "Q", "9"],
"r": ["r", "R", "2"],
"s": ["s", "S", "5", "$"],
"t": ["t", "T", "7", "+"],
"u": ["u", "U"],
"v": ["v", "V"],
"w": ["w", "W"],
"x": ["x", "X"],
"y": ["y", "Y"],
"z": ["z", "Z", "2", "%"],
"0": ["0"],
"1": ["1"],
"2": ["2"],
"3": ["3"],
"4": ["4"],
"5": ["5"],
"6": ["6"],
"7": ["7"],
"8": ["8"],
"9": ["9"],
" ": [" ", "\t", "_"]
}
def all_variations(word: str) -> list:
"""
Produce all single-character leet variations of a string
"""
ans = [""]
for leet_letter in [ASCII_SUBS[i] for i in word]:
ans = [x + y for x in ans for y in leet_letter]
return ans
def variation_gen(word: str):
"""
Produces all single-character leet variations of a string
Args:
word: a 3 character string to generate all variations
Returns:
generator: generator for all possible leet variations
"""
return product(*(ASCII_SUBS[i] for i in word))
# MASKED: all_valid_variations function (lines 85-100)
def valid_variation(word: str) -> str:
"""
Generates a single valid variation
Args:
word: the triplet to generate a variation from
Returns:
str: A valid variation of `word` or None otherwise
"""
for variation in variation_gen(word):
if all(i in B32_CHARSET for i in b64encode(
''.join(variation).encode())):
return "".join(variation)
return None
# List to precompute the triplets for which there doesnt exist a valid
# variation
NON_LEET = []
for perm in product(string.ascii_lowercase + ' ' + string.digits, repeat=3):
if not valid_variation(''.join(perm)):
NON_LEET.append(''.join(perm))
def transform(strng: str) -> str:
"""
Transform the string to only lower alpha and numerics and spaces
Converts uppercase to lower case and strips all other characters except
space
"""
for char in string.punctuation + string.whitespace[1:]:
strng = strng.replace(char, '')
return strng.lower() + ' ' * (8 - len(strng) % 8)
def master_encode(strng: str) -> bytes:
"""
Encodes a string to its leet equivalent (sans punctuation) which when
base64 encoded contains only base32 characters
"""
if isinstance(strng, (bytes, bytearray)):
strng = strng.decode()
strng = transform(strng)
result = ''
i = 0
while i < len(strng):
try:
current = strng[i:i + 3]
if current in NON_LEET:
if current[:2] + ' ' not in NON_LEET:
result += valid_variation(current[:2] + ' ')
i += 2
elif current[0] + ' ' not in NON_LEET:
result += valid_variation(current[0] + ' ')
i += 1
elif ' {} '.format(current[0]) not in NON_LEET:
result += valid_variation(' {} '.format(current[0]))
i += 1
elif ' {}'.format(current[0]) not in NON_LEET:
result += valid_variation(' {}'.format(current[0]))
i += 1
else:
i += 1
else:
result += valid_variation(current)
i += 3
except TypeError:
i += 1
return b64encode(result.encode())
if __name__ == "__main__":
PARSER = ArgumentParser(description="")
PARSER.add_argument(
'--input',
help='read a single line directly from input',
action="store_true")
PARSER.add_argument(
'--show',
help='shows the transformed input which results in correct encoding',
action="store_true")
PARSER.add_argument(
'--file',
help='reading text from file for conversion',
action="append")
ARGS = PARSER.parse_args()
TEST_STRING = """Steganography is the practice of concealing a file,
message, image, or video within another file, message, image, or video.
The word steganography comes from Greek steganographia, which combines
the words steganos meaning "covered or concealed", and graphia meaning
"writing". The first recorded use of the term was by Johannes Trithemius
in his Steganographia, a treatise on cryptography and steganography,
disguised as a book on magic. Generally, the hidden messages appear to
be (or to be part of) something else: images, articles, shopping lists,
or some other cover text. For example, the hidden message may be in
invisible ink between the visible lines of a private letter. Some
implementations of steganography that lack a shared secret are forms
of security through obscurity, and key-dependent steganographic schemes
adhere to Kerckhoffs's principle."""
if ARGS.file:
with open(ARGS.file[0], 'rb') as inp_file:
TEST_STRING = inp_file.read()
else:
TEST_STRING = input("input the line to encode:\n")
ENCODED_STRING = master_encode(TEST_STRING)
print("ENCODED STRING: {}".format(ENCODED_STRING))
if ARGS.show:
print("Transformed string: {}".format(b64decode(ENCODED_STRING)))
# WTBVICAJV2VSZSBFWHBFY3RJIG4JOSBGTGFHNSBCVXQJYTFMICAJWTBVIDZFVCBJNSB3ZTFS\
# ZCBCYXNFNSBCYSAJTWJPMDJMZSAJTWVOVCBET25UICAJICB3T3JSWSBJVHMJIGYJVW4JIG4JZXZ\
# FIHIJVCNFTGVTNSAJ | def all_valid_variations(word: str) -> list:
"""
Returns all leet variations of a triplet which result in a
Base32 only charset words on base64 encoding
Args:
word: An english triplet
Returns:
list: of all valid variations
"""
result = []
for variation in variation_gen(word):
if all(i in B32_CHARSET for i in b64encode(
''.join(variation).encode())):
result.append("".join(variation))
return result | 85 | 100 | """inter-base steganography
producing base32 and base64 decodable strings"""
from base64 import b64encode, b64decode
import string
from itertools import product
from argparse import ArgumentParser
CHARSET = string.printable.encode()
B32_CHARSET = (string.ascii_uppercase + '234567').encode()
B64_CHARSET = (
string.ascii_lowercase +
string.ascii_uppercase +
string.digits +
'+/').encode()
ASCII_LOWER = string.ascii_lowercase.encode()
WHITESPACE = string.whitespace.encode()
ALPHA_SPACE = (
string.ascii_uppercase +
string.ascii_lowercase +
string.whitespace).encode()
ASCII_SUBS = {"a": ["a", "A", "4", "@"],
"b": ["b", "B", "8", "6"],
"c": ["c", "C", "("],
"d": ["d", "D"],
"e": ["e", "E", "3"],
"f": ["f", "F"],
"g": ["g", "G", "6", "9"],
"h": ["h", "H", "#"],
"i": ["i", "I", "1", "|", "!"],
"j": ["j", "J", "]", ";"],
"k": ["k", "K"],
"l": ["l", "L", "1", "|"],
"m": ["m", "M"],
"n": ["n", "N"],
"o": ["o", "O", "0"],
"p": ["p", "P"],
"q": ["q", "Q", "9"],
"r": ["r", "R", "2"],
"s": ["s", "S", "5", "$"],
"t": ["t", "T", "7", "+"],
"u": ["u", "U"],
"v": ["v", "V"],
"w": ["w", "W"],
"x": ["x", "X"],
"y": ["y", "Y"],
"z": ["z", "Z", "2", "%"],
"0": ["0"],
"1": ["1"],
"2": ["2"],
"3": ["3"],
"4": ["4"],
"5": ["5"],
"6": ["6"],
"7": ["7"],
"8": ["8"],
"9": ["9"],
" ": [" ", "\t", "_"]
}
def all_variations(word: str) -> list:
"""
Produce all single-character leet variations of a string
"""
ans = [""]
for leet_letter in [ASCII_SUBS[i] for i in word]:
ans = [x + y for x in ans for y in leet_letter]
return ans
def variation_gen(word: str):
"""
Produces all single-character leet variations of a string
Args:
word: a 3 character string to generate all variations
Returns:
generator: generator for all possible leet variations
"""
return product(*(ASCII_SUBS[i] for i in word))
def all_valid_variations(word: str) -> list:
"""
Returns all leet variations of a triplet which result in a
Base32 only charset words on base64 encoding
Args:
word: An english triplet
Returns:
list: of all valid variations
"""
result = []
for variation in variation_gen(word):
if all(i in B32_CHARSET for i in b64encode(
''.join(variation).encode())):
result.append("".join(variation))
return result
def valid_variation(word: str) -> str:
"""
Generates a single valid variation
Args:
word: the triplet to generate a variation from
Returns:
str: A valid variation of `word` or None otherwise
"""
for variation in variation_gen(word):
if all(i in B32_CHARSET for i in b64encode(
''.join(variation).encode())):
return "".join(variation)
return None
# List to precompute the triplets for which there doesnt exist a valid
# variation
NON_LEET = []
for perm in product(string.ascii_lowercase + ' ' + string.digits, repeat=3):
if not valid_variation(''.join(perm)):
NON_LEET.append(''.join(perm))
def transform(strng: str) -> str:
"""
Transform the string to only lower alpha and numerics and spaces
Converts uppercase to lower case and strips all other characters except
space
"""
for char in string.punctuation + string.whitespace[1:]:
strng = strng.replace(char, '')
return strng.lower() + ' ' * (8 - len(strng) % 8)
def master_encode(strng: str) -> bytes:
"""
Encodes a string to its leet equivalent (sans punctuation) which when
base64 encoded contains only base32 characters
"""
if isinstance(strng, (bytes, bytearray)):
strng = strng.decode()
strng = transform(strng)
result = ''
i = 0
while i < len(strng):
try:
current = strng[i:i + 3]
if current in NON_LEET:
if current[:2] + ' ' not in NON_LEET:
result += valid_variation(current[:2] + ' ')
i += 2
elif current[0] + ' ' not in NON_LEET:
result += valid_variation(current[0] + ' ')
i += 1
elif ' {} '.format(current[0]) not in NON_LEET:
result += valid_variation(' {} '.format(current[0]))
i += 1
elif ' {}'.format(current[0]) not in NON_LEET:
result += valid_variation(' {}'.format(current[0]))
i += 1
else:
i += 1
else:
result += valid_variation(current)
i += 3
except TypeError:
i += 1
return b64encode(result.encode())
if __name__ == "__main__":
PARSER = ArgumentParser(description="")
PARSER.add_argument(
'--input',
help='read a single line directly from input',
action="store_true")
PARSER.add_argument(
'--show',
help='shows the transformed input which results in correct encoding',
action="store_true")
PARSER.add_argument(
'--file',
help='reading text from file for conversion',
action="append")
ARGS = PARSER.parse_args()
TEST_STRING = """Steganography is the practice of concealing a file,
message, image, or video within another file, message, image, or video.
The word steganography comes from Greek steganographia, which combines
the words steganos meaning "covered or concealed", and graphia meaning
"writing". The first recorded use of the term was by Johannes Trithemius
in his Steganographia, a treatise on cryptography and steganography,
disguised as a book on magic. Generally, the hidden messages appear to
be (or to be part of) something else: images, articles, shopping lists,
or some other cover text. For example, the hidden message may be in
invisible ink between the visible lines of a private letter. Some
implementations of steganography that lack a shared secret are forms
of security through obscurity, and key-dependent steganographic schemes
adhere to Kerckhoffs's principle."""
if ARGS.file:
with open(ARGS.file[0], 'rb') as inp_file:
TEST_STRING = inp_file.read()
else:
TEST_STRING = input("input the line to encode:\n")
ENCODED_STRING = master_encode(TEST_STRING)
print("ENCODED STRING: {}".format(ENCODED_STRING))
if ARGS.show:
print("Transformed string: {}".format(b64decode(ENCODED_STRING)))
# WTBVICAJV2VSZSBFWHBFY3RJIG4JOSBGTGFHNSBCVXQJYTFMICAJWTBVIDZFVCBJNSB3ZTFS\
# ZCBCYXNFNSBCYSAJTWJPMDJMZSAJTWVOVCBET25UICAJICB3T3JSWSBJVHMJIGYJVW4JIG4JZXZ\
# FIHIJVCNFTGVTNSAJ
|
transform_boolean_operand_to_numeric | Transform boolean operand to numeric.
If the `operand` is:
- a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.
- a boolean literal, transform to the int value.
Otherwise, return the operand as it is. | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from abc import ABCMeta
from itertools import chain
from typing import Any, Optional, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.sql import functions as F, Column
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
DecimalType,
FractionalType,
IntegralType,
MapType,
NullType,
NumericType,
StringType,
StructType,
TimestampType,
TimestampNTZType,
UserDefinedType,
)
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import extension_dtypes
from pyspark.pandas.typedef.typehints import (
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
spark_type_to_pandas_dtype,
)
if extension_dtypes_available:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
if extension_float_dtypes_available:
from pandas import Float32Dtype, Float64Dtype
if extension_object_dtypes_available:
from pandas import BooleanDtype, StringDtype
def is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool = True) -> bool:
"""Check whether the `operand` is valid for arithmetic operations against numerics."""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, numbers.Number):
return not isinstance(operand, bool) or allow_bool
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return isinstance(operand.spark.data_type, NumericType) or (
allow_bool and isinstance(operand.spark.data_type, BooleanType)
)
else:
return False
# MASKED: transform_boolean_operand_to_numeric function (lines 83-108)
def _as_categorical_type(
index_ops: IndexOpsLike, dtype: CategoricalDtype, spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to categorical dtype, given `dtype` and `spark_type`."""
assert isinstance(dtype, CategoricalDtype)
if dtype.categories is None:
codes, uniques = index_ops.factorize()
return codes._with_new_scol(
codes.spark.column,
field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)),
)
else:
categories = dtype.categories
if len(categories) == 0:
scol = SF.lit(-1)
else:
kvs = chain(
*[(SF.lit(category), SF.lit(code)) for code, category in enumerate(categories)]
)
map_scol = F.create_map(*kvs)
scol = F.coalesce(map_scol[index_ops.spark.column], SF.lit(-1))
return index_ops._with_new_scol(
scol.cast(spark_type),
field=index_ops._internal.data_fields[0].copy(
dtype=dtype, spark_type=spark_type, nullable=False
),
)
def _as_bool_type(index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
"""Cast `index_ops` to BooleanType Spark type, given `dtype`."""
spark_type = BooleanType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(index_ops.spark.column.isNull(), SF.lit(False)).otherwise(
index_ops.spark.column.cast(spark_type)
)
return index_ops._with_new_scol(
scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)
)
def _as_string_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], *, null_str: str = str(None)
) -> IndexOpsLike:
"""Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,
representing null Spark column. Note that `null_str` is for non-extension dtypes only.
"""
spark_type = StringType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
casted = index_ops.spark.column.cast(spark_type)
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(
scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)
)
def _as_other_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.
Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType.
"""
from pyspark.pandas.internal import InternalField
need_pre_process = (
isinstance(dtype, CategoricalDtype)
or isinstance(spark_type, BooleanType)
or isinstance(spark_type, StringType)
)
assert not need_pre_process, "Pre-processing is needed before the type casting."
scol = index_ops.spark.column.cast(spark_type)
return index_ops._with_new_scol(scol, field=InternalField(dtype=dtype))
def _sanitize_list_like(operand: Any) -> None:
"""Raise TypeError if operand is list-like."""
if isinstance(operand, (list, tuple, dict, set)):
raise TypeError("The operation can not be applied to %s." % type(operand).__name__)
def _is_valid_for_logical_operator(right: Any) -> bool:
from pyspark.pandas.base import IndexOpsMixin
return isinstance(right, (int, bool)) or (
isinstance(right, IndexOpsMixin)
and (
isinstance(right.spark.data_type, BooleanType)
or isinstance(right.spark.data_type, IntegralType)
)
)
def _is_boolean_type(right: Any) -> bool:
from pyspark.pandas.base import IndexOpsMixin
return isinstance(right, bool) or (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BooleanType)
)
class DataTypeOps(object, metaclass=ABCMeta):
"""The base class for binary operations of pandas-on-Spark objects (of different data types)."""
def __new__(cls, dtype: Dtype, spark_type: DataType) -> "DataTypeOps":
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps, BooleanExtensionOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps, DatetimeNTZOps
from pyspark.pandas.data_type_ops.null_ops import NullOps
from pyspark.pandas.data_type_ops.num_ops import (
DecimalOps,
FractionalExtensionOps,
FractionalOps,
IntegralExtensionOps,
IntegralOps,
)
from pyspark.pandas.data_type_ops.string_ops import StringOps, StringExtensionOps
from pyspark.pandas.data_type_ops.udt_ops import UDTOps
if isinstance(dtype, CategoricalDtype):
return object.__new__(CategoricalOps)
elif isinstance(spark_type, DecimalType):
return object.__new__(DecimalOps)
elif isinstance(spark_type, FractionalType):
if extension_float_dtypes_available and type(dtype) in [Float32Dtype, Float64Dtype]:
return object.__new__(FractionalExtensionOps)
else:
return object.__new__(FractionalOps)
elif isinstance(spark_type, IntegralType):
if extension_dtypes_available and type(dtype) in [
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
]:
return object.__new__(IntegralExtensionOps)
else:
return object.__new__(IntegralOps)
elif isinstance(spark_type, StringType):
if extension_object_dtypes_available and isinstance(dtype, StringDtype):
return object.__new__(StringExtensionOps)
else:
return object.__new__(StringOps)
elif isinstance(spark_type, BooleanType):
if extension_object_dtypes_available and isinstance(dtype, BooleanDtype):
return object.__new__(BooleanExtensionOps)
else:
return object.__new__(BooleanOps)
elif isinstance(spark_type, TimestampType):
return object.__new__(DatetimeOps)
elif isinstance(spark_type, TimestampNTZType):
return object.__new__(DatetimeNTZOps)
elif isinstance(spark_type, DateType):
return object.__new__(DateOps)
elif isinstance(spark_type, BinaryType):
return object.__new__(BinaryOps)
elif isinstance(spark_type, ArrayType):
return object.__new__(ArrayOps)
elif isinstance(spark_type, MapType):
return object.__new__(MapOps)
elif isinstance(spark_type, StructType):
return object.__new__(StructOps)
elif isinstance(spark_type, NullType):
return object.__new__(NullOps)
elif isinstance(spark_type, UserDefinedType):
return object.__new__(UDTOps)
else:
raise TypeError("Type %s was not understood." % dtype)
def __init__(self, dtype: Dtype, spark_type: DataType):
self.dtype = dtype
self.spark_type = spark_type
@property
def pretty_name(self) -> str:
raise NotImplementedError()
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise and can not be applied to %s." % self.pretty_name)
def xor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise xor can not be applied to %s." % self.pretty_name)
def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise or can not be applied to %s." % self.pretty_name)
def rand(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left.__and__(right)
def rxor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left ^ right
def ror(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left.__or__(right)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary - can not be applied to %s." % self.pretty_name)
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("abs() can not be applied to %s." % self.pretty_name)
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("< can not be applied to %s." % self.pretty_name)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("<= can not be applied to %s." % self.pretty_name)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("> can not be applied to %s." % self.pretty_name)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError(">= can not be applied to %s." % self.pretty_name)
def eq(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
_sanitize_list_like(right)
return column_op(Column.__eq__)(left, right)
def ne(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
_sanitize_list_like(right)
return column_op(Column.__ne__)(left, right)
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary ~ can not be applied to %s." % self.pretty_name)
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col
def prepare(self, col: pd.Series) -> pd.Series:
"""Prepare column when from_pandas."""
return col.replace({np.nan: None})
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull(),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def nan_to_null(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops.copy()
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
raise TypeError("astype can not be applied to %s." % self.pretty_name) | def transform_boolean_operand_to_numeric(
operand: Any, *, spark_type: Optional[DataType] = None
) -> Any:
"""Transform boolean operand to numeric.
If the `operand` is:
- a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.
- a boolean literal, transform to the int value.
Otherwise, return the operand as it is.
"""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):
assert spark_type, "spark_type must be provided if the operand is a boolean IndexOpsMixin"
assert isinstance(spark_type, NumericType), "spark_type must be NumericType"
dtype = spark_type_to_pandas_dtype(
spark_type, use_extension_dtypes=operand._internal.data_fields[0].is_extension_dtype
)
return operand._with_new_scol(
operand.spark.column.cast(spark_type),
field=operand._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type),
)
elif isinstance(operand, bool):
return int(operand)
else:
return operand | 83 | 108 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from abc import ABCMeta
from itertools import chain
from typing import Any, Optional, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.sql import functions as F, Column
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
DecimalType,
FractionalType,
IntegralType,
MapType,
NullType,
NumericType,
StringType,
StructType,
TimestampType,
TimestampNTZType,
UserDefinedType,
)
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import extension_dtypes
from pyspark.pandas.typedef.typehints import (
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
spark_type_to_pandas_dtype,
)
if extension_dtypes_available:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
if extension_float_dtypes_available:
from pandas import Float32Dtype, Float64Dtype
if extension_object_dtypes_available:
from pandas import BooleanDtype, StringDtype
def is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool = True) -> bool:
"""Check whether the `operand` is valid for arithmetic operations against numerics."""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, numbers.Number):
return not isinstance(operand, bool) or allow_bool
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return isinstance(operand.spark.data_type, NumericType) or (
allow_bool and isinstance(operand.spark.data_type, BooleanType)
)
else:
return False
def transform_boolean_operand_to_numeric(
operand: Any, *, spark_type: Optional[DataType] = None
) -> Any:
"""Transform boolean operand to numeric.
If the `operand` is:
- a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.
- a boolean literal, transform to the int value.
Otherwise, return the operand as it is.
"""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):
assert spark_type, "spark_type must be provided if the operand is a boolean IndexOpsMixin"
assert isinstance(spark_type, NumericType), "spark_type must be NumericType"
dtype = spark_type_to_pandas_dtype(
spark_type, use_extension_dtypes=operand._internal.data_fields[0].is_extension_dtype
)
return operand._with_new_scol(
operand.spark.column.cast(spark_type),
field=operand._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type),
)
elif isinstance(operand, bool):
return int(operand)
else:
return operand
def _as_categorical_type(
index_ops: IndexOpsLike, dtype: CategoricalDtype, spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to categorical dtype, given `dtype` and `spark_type`."""
assert isinstance(dtype, CategoricalDtype)
if dtype.categories is None:
codes, uniques = index_ops.factorize()
return codes._with_new_scol(
codes.spark.column,
field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)),
)
else:
categories = dtype.categories
if len(categories) == 0:
scol = SF.lit(-1)
else:
kvs = chain(
*[(SF.lit(category), SF.lit(code)) for code, category in enumerate(categories)]
)
map_scol = F.create_map(*kvs)
scol = F.coalesce(map_scol[index_ops.spark.column], SF.lit(-1))
return index_ops._with_new_scol(
scol.cast(spark_type),
field=index_ops._internal.data_fields[0].copy(
dtype=dtype, spark_type=spark_type, nullable=False
),
)
def _as_bool_type(index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
"""Cast `index_ops` to BooleanType Spark type, given `dtype`."""
spark_type = BooleanType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(index_ops.spark.column.isNull(), SF.lit(False)).otherwise(
index_ops.spark.column.cast(spark_type)
)
return index_ops._with_new_scol(
scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)
)
def _as_string_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], *, null_str: str = str(None)
) -> IndexOpsLike:
"""Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,
representing null Spark column. Note that `null_str` is for non-extension dtypes only.
"""
spark_type = StringType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
casted = index_ops.spark.column.cast(spark_type)
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(
scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)
)
def _as_other_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.
Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType.
"""
from pyspark.pandas.internal import InternalField
need_pre_process = (
isinstance(dtype, CategoricalDtype)
or isinstance(spark_type, BooleanType)
or isinstance(spark_type, StringType)
)
assert not need_pre_process, "Pre-processing is needed before the type casting."
scol = index_ops.spark.column.cast(spark_type)
return index_ops._with_new_scol(scol, field=InternalField(dtype=dtype))
def _sanitize_list_like(operand: Any) -> None:
"""Raise TypeError if operand is list-like."""
if isinstance(operand, (list, tuple, dict, set)):
raise TypeError("The operation can not be applied to %s." % type(operand).__name__)
def _is_valid_for_logical_operator(right: Any) -> bool:
from pyspark.pandas.base import IndexOpsMixin
return isinstance(right, (int, bool)) or (
isinstance(right, IndexOpsMixin)
and (
isinstance(right.spark.data_type, BooleanType)
or isinstance(right.spark.data_type, IntegralType)
)
)
def _is_boolean_type(right: Any) -> bool:
from pyspark.pandas.base import IndexOpsMixin
return isinstance(right, bool) or (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BooleanType)
)
class DataTypeOps(object, metaclass=ABCMeta):
"""The base class for binary operations of pandas-on-Spark objects (of different data types)."""
def __new__(cls, dtype: Dtype, spark_type: DataType) -> "DataTypeOps":
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps, BooleanExtensionOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps, DatetimeNTZOps
from pyspark.pandas.data_type_ops.null_ops import NullOps
from pyspark.pandas.data_type_ops.num_ops import (
DecimalOps,
FractionalExtensionOps,
FractionalOps,
IntegralExtensionOps,
IntegralOps,
)
from pyspark.pandas.data_type_ops.string_ops import StringOps, StringExtensionOps
from pyspark.pandas.data_type_ops.udt_ops import UDTOps
if isinstance(dtype, CategoricalDtype):
return object.__new__(CategoricalOps)
elif isinstance(spark_type, DecimalType):
return object.__new__(DecimalOps)
elif isinstance(spark_type, FractionalType):
if extension_float_dtypes_available and type(dtype) in [Float32Dtype, Float64Dtype]:
return object.__new__(FractionalExtensionOps)
else:
return object.__new__(FractionalOps)
elif isinstance(spark_type, IntegralType):
if extension_dtypes_available and type(dtype) in [
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
]:
return object.__new__(IntegralExtensionOps)
else:
return object.__new__(IntegralOps)
elif isinstance(spark_type, StringType):
if extension_object_dtypes_available and isinstance(dtype, StringDtype):
return object.__new__(StringExtensionOps)
else:
return object.__new__(StringOps)
elif isinstance(spark_type, BooleanType):
if extension_object_dtypes_available and isinstance(dtype, BooleanDtype):
return object.__new__(BooleanExtensionOps)
else:
return object.__new__(BooleanOps)
elif isinstance(spark_type, TimestampType):
return object.__new__(DatetimeOps)
elif isinstance(spark_type, TimestampNTZType):
return object.__new__(DatetimeNTZOps)
elif isinstance(spark_type, DateType):
return object.__new__(DateOps)
elif isinstance(spark_type, BinaryType):
return object.__new__(BinaryOps)
elif isinstance(spark_type, ArrayType):
return object.__new__(ArrayOps)
elif isinstance(spark_type, MapType):
return object.__new__(MapOps)
elif isinstance(spark_type, StructType):
return object.__new__(StructOps)
elif isinstance(spark_type, NullType):
return object.__new__(NullOps)
elif isinstance(spark_type, UserDefinedType):
return object.__new__(UDTOps)
else:
raise TypeError("Type %s was not understood." % dtype)
def __init__(self, dtype: Dtype, spark_type: DataType):
self.dtype = dtype
self.spark_type = spark_type
@property
def pretty_name(self) -> str:
raise NotImplementedError()
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise and can not be applied to %s." % self.pretty_name)
def xor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise xor can not be applied to %s." % self.pretty_name)
def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise or can not be applied to %s." % self.pretty_name)
def rand(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left.__and__(right)
def rxor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left ^ right
def ror(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left.__or__(right)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary - can not be applied to %s." % self.pretty_name)
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("abs() can not be applied to %s." % self.pretty_name)
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("< can not be applied to %s." % self.pretty_name)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("<= can not be applied to %s." % self.pretty_name)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("> can not be applied to %s." % self.pretty_name)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError(">= can not be applied to %s." % self.pretty_name)
def eq(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
_sanitize_list_like(right)
return column_op(Column.__eq__)(left, right)
def ne(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
_sanitize_list_like(right)
return column_op(Column.__ne__)(left, right)
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary ~ can not be applied to %s." % self.pretty_name)
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col
def prepare(self, col: pd.Series) -> pd.Series:
"""Prepare column when from_pandas."""
return col.replace({np.nan: None})
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull(),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def nan_to_null(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops.copy()
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
raise TypeError("astype can not be applied to %s." % self.pretty_name)
|
eventFilter | Event filter implementation.
For information, see the QT docs:
http://doc.qt.io/qt-4.8/qobject.html#eventFilter
This will emit the resized signal (in this class)
whenever the linked up object is being resized.
:param obj: The object that is being watched for events
:param event: Event object that the object has emitted
:returns: Always returns False to indicate that no events
should ever be discarded by the filter. | # Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
from sgtk.platform.qt import QtCore, QtGui
import sgtk
class WorkAreaButton(QtGui.QToolButton):
"""
UX for switching work area.
This displays a "change work area" button which a user can interact with
The button is designed to expand so that it is subtle until a user
hovers over it.
:signal clicked(str, int): Fires when someone clicks the change
work area button. Arguments passed are the entity type and entity id
"""
WIDGET_WIDTH_COLLAPSED = 30
WIDGET_HEIGHT = 30
NON_WORK_AREA_TYPES = [
"PublishedFile",
"Project",
"TankPublishedFile",
"Version",
"Note",
"Group",
"HumanUser",
"ScriptUser",
"ApiUser",
"ClientUser",
"Department",
"Cut",
"CutItem",
"Delivery",
"Playlist",
"Ticket"
]
change_work_area = QtCore.Signal(str, int)
def __init__(self, parent):
"""
:param parent: The model parent.
:type parent: :class:`~PySide.QtGui.QObject`
"""
super(WorkAreaButton, self).__init__(parent)
# an icon to represent all items which
# aren't the current work area
self._normal_icon = QtGui.QIcon()
self._normal_icon.addPixmap(
QtGui.QPixmap(":/tk_multi_infopanel/pin.png"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off
)
# an icon to represent the current work area
self._current_work_area_icon = QtGui.QIcon()
self._current_work_area_icon.addPixmap(
QtGui.QPixmap(":/tk_multi_infopanel/pin_blue.png"),
QtGui.QIcon.Disabled,
QtGui.QIcon.Off
)
self.setIcon(self._normal_icon)
self.setIconSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self._bundle = sgtk.platform.current_bundle()
self._entity_type = None
self._entity_id = None
self._is_static = False
self._caption = "Set Work Area"
self._width = 120
self.clicked.connect(self._on_click)
self.setVisible(False)
def set_up(self, entity_type, entity_id):
"""
Sets up the button for a given entity.
:param entity_type: Entity type to set up button for
:param entity_id: Entity id to set up button for
"""
self._entity_id = entity_id
self._entity_type = entity_type
if not self._bundle.get_setting("enable_context_switch"):
# context switch button not enabled
return
# figure out if this is the current project
context = self._bundle.context
context_entity = context.task or context.entity or context.project or None
self.setVisible(True)
self.setEnabled(True)
self.setIcon(self._normal_icon)
self._is_static = False
if context_entity and context_entity["type"] == entity_type and context_entity["id"] == entity_id:
# the current work area
self.setPopupMode(QtGui.QToolButton.DelayedPopup)
self.setToolTip(
"This is your current work area.\n"
"The work you do will be associated with this item in Shotgun."
)
# set blue icon
self.setIcon(self._current_work_area_icon)
# disable the button
self.setEnabled(False)
# make sure it doesn't pop on mouseover
self._is_static = True
elif entity_type in self.NON_WORK_AREA_TYPES:
# don't show the ctx selector for some types
self.setToolTip("This cannot be a work area.")
# disable the button
self.setEnabled(False)
# make sure it doesn't pop on mouse over
self._is_static = True
else:
if entity_type == "Task":
self._caption = "Set Work Area"
self.setToolTip("Click to set your work area to the current task.")
else:
self._caption = "Pick Work Area"
self.setToolTip("Click to select a task.")
self._init_default_state()
def _init_default_state(self):
"""
Sets up the default collapsed state of the button
"""
self.setText("")
self.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.setMinimumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self.setMaximumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
# tell the style sheet to adjust
self.setProperty("is_expanded", False)
self.style().unpolish(self)
self.style().polish(self)
def _on_click(self):
"""
Executed when the button is clicked
"""
self.change_work_area.emit(self._entity_type, self._entity_id)
def enterEvent(self, evt):
"""
QT Mouse enter event
"""
if not self._is_static:
# not the current work area. so expand the button
self.setText(self._caption)
self.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.setMinimumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT))
self.setMaximumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT))
# tell the style sheet to adjust
self.setProperty("is_expanded", True)
self.style().unpolish(self)
self.style().polish(self)
return super(WorkAreaButton, self).enterEvent(evt)
def leaveEvent(self, evt):
"""
QT Mouse leave event
"""
if not self._is_static:
# collapse button after a delay
QtCore.QTimer.singleShot(300, self._init_default_state)
return super(WorkAreaButton, self).leaveEvent(evt)
class FloatingWorkAreaButton(WorkAreaButton):
"""
UX for switching work area.
This displays a "change work area" button which a user can interact with
The button is designed to expand so that it is subtle until a user
hovers over it.
Derives from :class:`WorkAreaButton` and positions the widget
relative to the bottom-right corner of the parent widget.
:signal clicked(str, int): Fires when someone clicks the change
work area button. Arguments passed are the entity type and entity id
"""
RIGHT_OFFSET = 6
BOTTOM_OFFSET = 6
def __init__(self, parent):
"""
:param right_side_offset: Right hand side offset in pixels
:param bottom_offset: Bottom offset in pixels
:param parent: The model parent.
:type parent: :class:`~PySide.QtGui.QObject`
"""
super(FloatingWorkAreaButton, self).__init__(parent)
# hook up a listener to the parent window so this widget
# follows along when the parent window changes size
filter = ResizeEventFilter(parent)
filter.resized.connect(self._on_parent_resized)
parent.installEventFilter(filter)
def set_up(self, entity_type, entity_id):
"""
Sets up the button for a given entity.
:param entity_type: Entity type to set up button for
:param entity_id: Entity id to set up button for
"""
if entity_type in self.NON_WORK_AREA_TYPES:
# hide the widget
self.setVisible(False)
else:
# base class implementation
super(FloatingWorkAreaButton, self).set_up(entity_type, entity_id)
def __position_widget(self):
"""
Moves the widget to the bottom-right corner of the parent widget.
"""
self.move(
self.parentWidget().width() - self.width() - self.RIGHT_OFFSET,
self.parentWidget().height() - self.height() - self.BOTTOM_OFFSET
)
def _init_default_state(self):
"""
Sets up the default collapsed state of the button
"""
super(FloatingWorkAreaButton, self)._init_default_state()
self.__position_widget()
def enterEvent(self, evt):
"""
QT Mouse enter event
"""
status = super(FloatingWorkAreaButton, self).enterEvent(evt)
if not self._is_static:
self.__position_widget()
return status
def _on_parent_resized(self):
"""
Special slot hooked up to the event filter.
When associated widget is resized this slot is being called.
"""
self.__position_widget()
class ResizeEventFilter(QtCore.QObject):
"""
Utility and helper.
Event filter which emits a resized signal whenever
the monitored widget resizes.
You use it like this:
# create the filter object. Typically, it's
# it's easiest to parent it to the object that is
# being monitored (in this case self.ui.thumbnail)
filter = ResizeEventFilter(self.ui.thumbnail)
# now set up a signal/slot connection so that the
# __on_thumb_resized slot gets called every time
# the widget is resized
filter.resized.connect(self.__on_thumb_resized)
# finally, install the event filter into the QT
# event system
self.ui.thumbnail.installEventFilter(filter)
"""
resized = QtCore.Signal()
# MASKED: eventFilter function (lines 302-321) | def eventFilter(self, obj, event):
"""
Event filter implementation.
For information, see the QT docs:
http://doc.qt.io/qt-4.8/qobject.html#eventFilter
This will emit the resized signal (in this class)
whenever the linked up object is being resized.
:param obj: The object that is being watched for events
:param event: Event object that the object has emitted
:returns: Always returns False to indicate that no events
should ever be discarded by the filter.
"""
# peek at the message
if event.type() == QtCore.QEvent.Resize:
# re-broadcast any resize events
self.resized.emit()
# pass it on!
return False | 302 | 321 | # Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
from sgtk.platform.qt import QtCore, QtGui
import sgtk
class WorkAreaButton(QtGui.QToolButton):
"""
UX for switching work area.
This displays a "change work area" button which a user can interact with
The button is designed to expand so that it is subtle until a user
hovers over it.
:signal clicked(str, int): Fires when someone clicks the change
work area button. Arguments passed are the entity type and entity id
"""
WIDGET_WIDTH_COLLAPSED = 30
WIDGET_HEIGHT = 30
NON_WORK_AREA_TYPES = [
"PublishedFile",
"Project",
"TankPublishedFile",
"Version",
"Note",
"Group",
"HumanUser",
"ScriptUser",
"ApiUser",
"ClientUser",
"Department",
"Cut",
"CutItem",
"Delivery",
"Playlist",
"Ticket"
]
change_work_area = QtCore.Signal(str, int)
def __init__(self, parent):
"""
:param parent: The model parent.
:type parent: :class:`~PySide.QtGui.QObject`
"""
super(WorkAreaButton, self).__init__(parent)
# an icon to represent all items which
# aren't the current work area
self._normal_icon = QtGui.QIcon()
self._normal_icon.addPixmap(
QtGui.QPixmap(":/tk_multi_infopanel/pin.png"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off
)
# an icon to represent the current work area
self._current_work_area_icon = QtGui.QIcon()
self._current_work_area_icon.addPixmap(
QtGui.QPixmap(":/tk_multi_infopanel/pin_blue.png"),
QtGui.QIcon.Disabled,
QtGui.QIcon.Off
)
self.setIcon(self._normal_icon)
self.setIconSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self._bundle = sgtk.platform.current_bundle()
self._entity_type = None
self._entity_id = None
self._is_static = False
self._caption = "Set Work Area"
self._width = 120
self.clicked.connect(self._on_click)
self.setVisible(False)
def set_up(self, entity_type, entity_id):
"""
Sets up the button for a given entity.
:param entity_type: Entity type to set up button for
:param entity_id: Entity id to set up button for
"""
self._entity_id = entity_id
self._entity_type = entity_type
if not self._bundle.get_setting("enable_context_switch"):
# context switch button not enabled
return
# figure out if this is the current project
context = self._bundle.context
context_entity = context.task or context.entity or context.project or None
self.setVisible(True)
self.setEnabled(True)
self.setIcon(self._normal_icon)
self._is_static = False
if context_entity and context_entity["type"] == entity_type and context_entity["id"] == entity_id:
# the current work area
self.setPopupMode(QtGui.QToolButton.DelayedPopup)
self.setToolTip(
"This is your current work area.\n"
"The work you do will be associated with this item in Shotgun."
)
# set blue icon
self.setIcon(self._current_work_area_icon)
# disable the button
self.setEnabled(False)
# make sure it doesn't pop on mouseover
self._is_static = True
elif entity_type in self.NON_WORK_AREA_TYPES:
# don't show the ctx selector for some types
self.setToolTip("This cannot be a work area.")
# disable the button
self.setEnabled(False)
# make sure it doesn't pop on mouse over
self._is_static = True
else:
if entity_type == "Task":
self._caption = "Set Work Area"
self.setToolTip("Click to set your work area to the current task.")
else:
self._caption = "Pick Work Area"
self.setToolTip("Click to select a task.")
self._init_default_state()
def _init_default_state(self):
"""
Sets up the default collapsed state of the button
"""
self.setText("")
self.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.setMinimumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self.setMaximumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
# tell the style sheet to adjust
self.setProperty("is_expanded", False)
self.style().unpolish(self)
self.style().polish(self)
def _on_click(self):
"""
Executed when the button is clicked
"""
self.change_work_area.emit(self._entity_type, self._entity_id)
def enterEvent(self, evt):
"""
QT Mouse enter event
"""
if not self._is_static:
# not the current work area. so expand the button
self.setText(self._caption)
self.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.setMinimumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT))
self.setMaximumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT))
# tell the style sheet to adjust
self.setProperty("is_expanded", True)
self.style().unpolish(self)
self.style().polish(self)
return super(WorkAreaButton, self).enterEvent(evt)
def leaveEvent(self, evt):
"""
QT Mouse leave event
"""
if not self._is_static:
# collapse button after a delay
QtCore.QTimer.singleShot(300, self._init_default_state)
return super(WorkAreaButton, self).leaveEvent(evt)
class FloatingWorkAreaButton(WorkAreaButton):
"""
UX for switching work area.
This displays a "change work area" button which a user can interact with
The button is designed to expand so that it is subtle until a user
hovers over it.
Derives from :class:`WorkAreaButton` and positions the widget
relative to the bottom-right corner of the parent widget.
:signal clicked(str, int): Fires when someone clicks the change
work area button. Arguments passed are the entity type and entity id
"""
RIGHT_OFFSET = 6
BOTTOM_OFFSET = 6
def __init__(self, parent):
"""
:param right_side_offset: Right hand side offset in pixels
:param bottom_offset: Bottom offset in pixels
:param parent: The model parent.
:type parent: :class:`~PySide.QtGui.QObject`
"""
super(FloatingWorkAreaButton, self).__init__(parent)
# hook up a listener to the parent window so this widget
# follows along when the parent window changes size
filter = ResizeEventFilter(parent)
filter.resized.connect(self._on_parent_resized)
parent.installEventFilter(filter)
def set_up(self, entity_type, entity_id):
"""
Sets up the button for a given entity.
:param entity_type: Entity type to set up button for
:param entity_id: Entity id to set up button for
"""
if entity_type in self.NON_WORK_AREA_TYPES:
# hide the widget
self.setVisible(False)
else:
# base class implementation
super(FloatingWorkAreaButton, self).set_up(entity_type, entity_id)
def __position_widget(self):
"""
Moves the widget to the bottom-right corner of the parent widget.
"""
self.move(
self.parentWidget().width() - self.width() - self.RIGHT_OFFSET,
self.parentWidget().height() - self.height() - self.BOTTOM_OFFSET
)
def _init_default_state(self):
"""
Sets up the default collapsed state of the button
"""
super(FloatingWorkAreaButton, self)._init_default_state()
self.__position_widget()
def enterEvent(self, evt):
"""
QT Mouse enter event
"""
status = super(FloatingWorkAreaButton, self).enterEvent(evt)
if not self._is_static:
self.__position_widget()
return status
def _on_parent_resized(self):
"""
Special slot hooked up to the event filter.
When associated widget is resized this slot is being called.
"""
self.__position_widget()
class ResizeEventFilter(QtCore.QObject):
"""
Utility and helper.
Event filter which emits a resized signal whenever
the monitored widget resizes.
You use it like this:
# create the filter object. Typically, it's
# it's easiest to parent it to the object that is
# being monitored (in this case self.ui.thumbnail)
filter = ResizeEventFilter(self.ui.thumbnail)
# now set up a signal/slot connection so that the
# __on_thumb_resized slot gets called every time
# the widget is resized
filter.resized.connect(self.__on_thumb_resized)
# finally, install the event filter into the QT
# event system
self.ui.thumbnail.installEventFilter(filter)
"""
resized = QtCore.Signal()
def eventFilter(self, obj, event):
"""
Event filter implementation.
For information, see the QT docs:
http://doc.qt.io/qt-4.8/qobject.html#eventFilter
This will emit the resized signal (in this class)
whenever the linked up object is being resized.
:param obj: The object that is being watched for events
:param event: Event object that the object has emitted
:returns: Always returns False to indicate that no events
should ever be discarded by the filter.
"""
# peek at the message
if event.type() == QtCore.QEvent.Resize:
# re-broadcast any resize events
self.resized.emit()
# pass it on!
return False
|
broadcast_trick | Provide a decorator to wrap common numpy function with a broadcast trick.
Dask arrays are currently immutable; thus when we know an array is uniform,
we can replace the actual data by a single value and have all elements point
to it, thus reducing the size.
>>> x = np.broadcast_to(1, (100,100,100))
>>> x.base.nbytes
8
Those array are not only more efficient locally, but dask serialisation is
aware of the _real_ size of those array and thus can send them around
efficiently and schedule accordingly.
Note that those array are read-only and numpy will refuse to assign to them,
so should be safe. | from functools import partial
from itertools import product
import numpy as np
from tlz import curry
from ..base import tokenize
from ..utils import funcname
from .blockwise import BlockwiseCreateArray
from .core import Array, normalize_chunks
from .utils import (
meta_from_array,
empty_like_safe,
full_like_safe,
ones_like_safe,
zeros_like_safe,
)
def _parse_wrap_args(func, args, kwargs, shape):
if isinstance(shape, np.ndarray):
shape = shape.tolist()
if not isinstance(shape, (tuple, list)):
shape = (shape,)
name = kwargs.pop("name", None)
chunks = kwargs.pop("chunks", "auto")
dtype = kwargs.pop("dtype", None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
dtype = np.dtype(dtype)
chunks = normalize_chunks(chunks, shape, dtype=dtype)
name = name or funcname(func) + "-" + tokenize(
func, shape, chunks, dtype, args, kwargs
)
return {
"shape": shape,
"dtype": dtype,
"kwargs": kwargs,
"chunks": chunks,
"name": name,
}
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
if "shape" not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop("shape")
if isinstance(shape, Array):
raise TypeError(
"Dask array input not supported. "
"Please use tuple, list, or a 1D numpy array instead."
)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
func = partial(func, dtype=dtype, **kwargs)
graph = BlockwiseCreateArray(
name,
func,
shape,
chunks,
)
return Array(graph, name, chunks, dtype=dtype, meta=kwargs.get("meta", None))
def wrap_func_like(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
x = args[0]
meta = meta_from_array(x)
shape = kwargs.get("shape", x.shape)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
shapes = list(shapes)
kw = [kwargs for _ in shapes]
for i, s in enumerate(list(shapes)):
kw[i]["shape"] = s
vals = ((partial(func, dtype=dtype, **k),) + args for (k, s) in zip(kw, shapes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, meta=meta.astype(dtype))
def wrap_func_like_safe(func, func_like, *args, **kwargs):
"""
Safe implementation for wrap_func_like(), attempts to use func_like(),
if the shape keyword argument, falls back to func().
"""
try:
return func_like(*args, **kwargs)
except TypeError:
return func(*args, **kwargs)
@curry
def wrap(wrap_func, func, **kwargs):
func_like = kwargs.pop("func_like", None)
if func_like is None:
f = partial(wrap_func, func, **kwargs)
else:
f = partial(wrap_func, func_like, **kwargs)
template = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also features
optional keyword arguments ``chunks: int, tuple, or dict`` and ``name: str``.
Original signature follows below.
"""
if func.__doc__ is not None:
f.__doc__ = template % {"name": func.__name__} + func.__doc__
f.__name__ = "blocked_" + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
@curry
def _broadcast_trick_inner(func, shape, meta=(), *args, **kwargs):
if shape == ():
return np.broadcast_to(func(meta, shape=(), *args, **kwargs), shape)
else:
return np.broadcast_to(func(meta, shape=1, *args, **kwargs), shape)
# MASKED: broadcast_trick function (lines 154-181)
ones = w(broadcast_trick(ones_like_safe), dtype="f8")
zeros = w(broadcast_trick(zeros_like_safe), dtype="f8")
empty = w(broadcast_trick(empty_like_safe), dtype="f8")
w_like = wrap(wrap_func_like_safe)
empty_like = w_like(np.empty, func_like=np.empty_like)
# full and full_like require special casing due to argument check on fill_value
# Generate wrapped functions only once
_full = w(broadcast_trick(full_like_safe))
_full_like = w_like(np.full, func_like=np.full_like)
# workaround for numpy doctest failure: https://github.com/numpy/numpy/pull/17472
_full.__doc__ = _full.__doc__.replace(
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
)
def full(shape, fill_value, *args, **kwargs):
# np.isscalar has somewhat strange behavior:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full(shape=shape, fill_value=fill_value, *args, **kwargs)
def full_like(a, fill_value, *args, **kwargs):
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full_like(
a=a,
fill_value=fill_value,
*args,
**kwargs,
)
full.__doc__ = _full.__doc__
full_like.__doc__ = _full_like.__doc__ | def broadcast_trick(func):
"""
Provide a decorator to wrap common numpy function with a broadcast trick.
Dask arrays are currently immutable; thus when we know an array is uniform,
we can replace the actual data by a single value and have all elements point
to it, thus reducing the size.
>>> x = np.broadcast_to(1, (100,100,100))
>>> x.base.nbytes
8
Those array are not only more efficient locally, but dask serialisation is
aware of the _real_ size of those array and thus can send them around
efficiently and schedule accordingly.
Note that those array are read-only and numpy will refuse to assign to them,
so should be safe.
"""
inner = _broadcast_trick_inner(func)
if func.__doc__ is not None:
inner.__doc__ = func.__doc__
inner.__name__ = func.__name__
if inner.__name__.endswith("_like_safe"):
inner.__name__ = inner.__name__[:-10]
return inner | 154 | 181 | from functools import partial
from itertools import product
import numpy as np
from tlz import curry
from ..base import tokenize
from ..utils import funcname
from .blockwise import BlockwiseCreateArray
from .core import Array, normalize_chunks
from .utils import (
meta_from_array,
empty_like_safe,
full_like_safe,
ones_like_safe,
zeros_like_safe,
)
def _parse_wrap_args(func, args, kwargs, shape):
if isinstance(shape, np.ndarray):
shape = shape.tolist()
if not isinstance(shape, (tuple, list)):
shape = (shape,)
name = kwargs.pop("name", None)
chunks = kwargs.pop("chunks", "auto")
dtype = kwargs.pop("dtype", None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
dtype = np.dtype(dtype)
chunks = normalize_chunks(chunks, shape, dtype=dtype)
name = name or funcname(func) + "-" + tokenize(
func, shape, chunks, dtype, args, kwargs
)
return {
"shape": shape,
"dtype": dtype,
"kwargs": kwargs,
"chunks": chunks,
"name": name,
}
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
if "shape" not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop("shape")
if isinstance(shape, Array):
raise TypeError(
"Dask array input not supported. "
"Please use tuple, list, or a 1D numpy array instead."
)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
func = partial(func, dtype=dtype, **kwargs)
graph = BlockwiseCreateArray(
name,
func,
shape,
chunks,
)
return Array(graph, name, chunks, dtype=dtype, meta=kwargs.get("meta", None))
def wrap_func_like(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
x = args[0]
meta = meta_from_array(x)
shape = kwargs.get("shape", x.shape)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
shapes = list(shapes)
kw = [kwargs for _ in shapes]
for i, s in enumerate(list(shapes)):
kw[i]["shape"] = s
vals = ((partial(func, dtype=dtype, **k),) + args for (k, s) in zip(kw, shapes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, meta=meta.astype(dtype))
def wrap_func_like_safe(func, func_like, *args, **kwargs):
"""
Safe implementation for wrap_func_like(), attempts to use func_like(),
if the shape keyword argument, falls back to func().
"""
try:
return func_like(*args, **kwargs)
except TypeError:
return func(*args, **kwargs)
@curry
def wrap(wrap_func, func, **kwargs):
func_like = kwargs.pop("func_like", None)
if func_like is None:
f = partial(wrap_func, func, **kwargs)
else:
f = partial(wrap_func, func_like, **kwargs)
template = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also features
optional keyword arguments ``chunks: int, tuple, or dict`` and ``name: str``.
Original signature follows below.
"""
if func.__doc__ is not None:
f.__doc__ = template % {"name": func.__name__} + func.__doc__
f.__name__ = "blocked_" + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
@curry
def _broadcast_trick_inner(func, shape, meta=(), *args, **kwargs):
if shape == ():
return np.broadcast_to(func(meta, shape=(), *args, **kwargs), shape)
else:
return np.broadcast_to(func(meta, shape=1, *args, **kwargs), shape)
def broadcast_trick(func):
"""
Provide a decorator to wrap common numpy function with a broadcast trick.
Dask arrays are currently immutable; thus when we know an array is uniform,
we can replace the actual data by a single value and have all elements point
to it, thus reducing the size.
>>> x = np.broadcast_to(1, (100,100,100))
>>> x.base.nbytes
8
Those array are not only more efficient locally, but dask serialisation is
aware of the _real_ size of those array and thus can send them around
efficiently and schedule accordingly.
Note that those array are read-only and numpy will refuse to assign to them,
so should be safe.
"""
inner = _broadcast_trick_inner(func)
if func.__doc__ is not None:
inner.__doc__ = func.__doc__
inner.__name__ = func.__name__
if inner.__name__.endswith("_like_safe"):
inner.__name__ = inner.__name__[:-10]
return inner
ones = w(broadcast_trick(ones_like_safe), dtype="f8")
zeros = w(broadcast_trick(zeros_like_safe), dtype="f8")
empty = w(broadcast_trick(empty_like_safe), dtype="f8")
w_like = wrap(wrap_func_like_safe)
empty_like = w_like(np.empty, func_like=np.empty_like)
# full and full_like require special casing due to argument check on fill_value
# Generate wrapped functions only once
_full = w(broadcast_trick(full_like_safe))
_full_like = w_like(np.full, func_like=np.full_like)
# workaround for numpy doctest failure: https://github.com/numpy/numpy/pull/17472
_full.__doc__ = _full.__doc__.replace(
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
)
def full(shape, fill_value, *args, **kwargs):
# np.isscalar has somewhat strange behavior:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full(shape=shape, fill_value=fill_value, *args, **kwargs)
def full_like(a, fill_value, *args, **kwargs):
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full_like(
a=a,
fill_value=fill_value,
*args,
**kwargs,
)
full.__doc__ = _full.__doc__
full_like.__doc__ = _full_like.__doc__
|
__init__ | Keyword args:
iqn (str): The iSCSI Qualified Name (or `null` if target is not iSCSI).
nqn (str): NVMe Qualified Name (or `null` if target is not NVMeoF).
portal (str): IP and port number (or `null` if target is not iSCSI).
wwn (str): Fibre Channel World Wide Name (or `null` if target is not Fibre Channel). | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class PortCommon(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'iqn': 'str',
'nqn': 'str',
'portal': 'str',
'wwn': 'str'
}
attribute_map = {
'iqn': 'iqn',
'nqn': 'nqn',
'portal': 'portal',
'wwn': 'wwn'
}
required_args = {
}
# MASKED: __init__ function (lines 49-70)
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PortCommon`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PortCommon, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortCommon):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | def __init__(
self,
iqn=None, # type: str
nqn=None, # type: str
portal=None, # type: str
wwn=None, # type: str
):
"""
Keyword args:
iqn (str): The iSCSI Qualified Name (or `null` if target is not iSCSI).
nqn (str): NVMe Qualified Name (or `null` if target is not NVMeoF).
portal (str): IP and port number (or `null` if target is not iSCSI).
wwn (str): Fibre Channel World Wide Name (or `null` if target is not Fibre Channel).
"""
if iqn is not None:
self.iqn = iqn
if nqn is not None:
self.nqn = nqn
if portal is not None:
self.portal = portal
if wwn is not None:
self.wwn = wwn | 49 | 70 | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class PortCommon(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'iqn': 'str',
'nqn': 'str',
'portal': 'str',
'wwn': 'str'
}
attribute_map = {
'iqn': 'iqn',
'nqn': 'nqn',
'portal': 'portal',
'wwn': 'wwn'
}
required_args = {
}
def __init__(
self,
iqn=None, # type: str
nqn=None, # type: str
portal=None, # type: str
wwn=None, # type: str
):
"""
Keyword args:
iqn (str): The iSCSI Qualified Name (or `null` if target is not iSCSI).
nqn (str): NVMe Qualified Name (or `null` if target is not NVMeoF).
portal (str): IP and port number (or `null` if target is not iSCSI).
wwn (str): Fibre Channel World Wide Name (or `null` if target is not Fibre Channel).
"""
if iqn is not None:
self.iqn = iqn
if nqn is not None:
self.nqn = nqn
if portal is not None:
self.portal = portal
if wwn is not None:
self.wwn = wwn
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PortCommon`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PortCommon, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortCommon):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
get_primaries | prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]] | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
# MASKED: get_primaries function (lines 123-144)
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb | 123 | 144 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
get_secondaries | secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan. | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
# MASKED: get_secondaries function (lines 211-239)
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3)) | 211 | 239 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
dot_pattern | dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image. | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
# MASKED: dot_pattern function (lines 770-809)
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img | 770 | 809 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
complex_dot_pattern | dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image. | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
# MASKED: complex_dot_pattern function (lines 812-859)
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img | 812 | 859 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
make_ycbcr_checker | YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
# MASKED: make_ycbcr_checker function (lines 946-988)
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img | 946 | 988 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
get_log10_x_scale | Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06]) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
# MASKED: get_log10_x_scale function (lines 1070-1086)
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x | 1,070 | 1,086 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
get_log2_x_scale | Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]]) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
# MASKED: get_log2_x_scale function (lines 1089-1104)
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x | 1,089 | 1,104 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
shaper_func_log2_to_linear | ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385]) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
# MASKED: shaper_func_log2_to_linear function (lines 1150-1170)
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y | 1,150 | 1,170 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
draw_straight_line | 直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
# MASKED: draw_straight_line function (lines 1173-1224)
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color | 1,173 | 1,224 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
draw_outline | img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
# MASKED: draw_outline function (lines 1227-1271)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width) | 1,227 | 1,271 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
calc_rad_patch_idx2 | 以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
# MASKED: calc_rad_patch_idx2 function (lines 1299-1334)
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx | 1,299 | 1,334 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
_calc_rgb_from_same_lstar_radial_data | 以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
# MASKED: _calc_rgb_from_same_lstar_radial_data function (lines 1337-1357)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0) | 1,337 | 1,357 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
calc_same_lstar_radial_color_patch_data | 以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
# MASKED: calc_same_lstar_radial_color_patch_data function (lines 1360-1392)
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list | 1,360 | 1,392 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
get_accelerated_x_1x | 単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ] | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
# MASKED: get_accelerated_x_1x function (lines 1416-1442)
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x | 1,416 | 1,442 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
get_accelerated_x_2x | 単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ] | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
# MASKED: get_accelerated_x_2x function (lines 1445-1473)
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x | 1,445 | 1,473 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
generate_color_checker_rgb_value | Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]] | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
# MASKED: generate_color_checker_rgb_value function (lines 1523-1590)
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb | 1,523 | 1,590 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
calc_st_pos_for_centering | Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
# MASKED: calc_st_pos_for_centering function (lines 1625-1656)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))) | def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v) | 1,625 | 1,656 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
|
moving_average | Moving average over one-dimensional array.
Parameters
----------
a
One-dimensional array.
n
Number of entries to average over. n=2 means averaging over the currrent
the previous entry.
Returns
-------
An array view storing the moving average. | """Utility functions and classes
"""
import sys
import inspect
import warnings
import importlib.util
from enum import Enum
from pathlib import Path
from weakref import WeakSet
from collections import namedtuple
from functools import partial, wraps
from types import ModuleType, MethodType
from typing import Union, Callable, Optional, Mapping, Any, Dict, Tuple
import numpy as np
from numpy import random
from scipy import sparse
from anndata import AnnData, __version__ as anndata_version
from textwrap import dedent
from packaging import version
from ._settings import settings
from ._compat import Literal
from . import logging as logg
class Empty(Enum):
token = 0
_empty = Empty.token
# e.g. https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
AnyRandom = Union[None, int, random.RandomState] # maybe in the future random.Generator
EPS = 1e-15
def check_versions():
from ._compat import pkg_version
umap_version = pkg_version("umap-learn")
if version.parse(anndata_version) < version.parse('0.6.10'):
from . import __version__
raise ImportError(
f'Scanpy {__version__} needs anndata version >=0.6.10, '
f'not {anndata_version}.\nRun `pip install anndata -U --no-deps`.'
)
if umap_version < version.parse('0.3.0'):
from . import __version__
# make this a warning, not an error
# it might be useful for people to still be able to run it
logg.warning(
f'Scanpy {__version__} needs umap ' f'version >=0.3.0, not {umap_version}.'
)
def getdoc(c_or_f: Union[Callable, type]) -> Optional[str]:
if getattr(c_or_f, '__doc__', None) is None:
return None
doc = inspect.getdoc(c_or_f)
if isinstance(c_or_f, type) and hasattr(c_or_f, '__init__'):
sig = inspect.signature(c_or_f.__init__)
else:
sig = inspect.signature(c_or_f)
def type_doc(name: str):
param: inspect.Parameter = sig.parameters[name]
cls = getattr(param.annotation, '__qualname__', repr(param.annotation))
if param.default is not param.empty:
return f'{cls}, optional (default: {param.default!r})'
else:
return cls
return '\n'.join(
f'{line} : {type_doc(line)}' if line.strip() in sig.parameters else line
for line in doc.split('\n')
)
def deprecated_arg_names(arg_mapping: Mapping[str, str]):
"""
Decorator which marks a functions keyword arguments as deprecated. It will
result in a warning being emitted when the deprecated keyword argument is
used, and the function being called with the new argument.
Parameters
----------
arg_mapping
Mapping from deprecated argument name to current argument name.
"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
for old, new in arg_mapping.items():
if old in kwargs:
warnings.warn(
f"Keyword argument '{old}' has been "
f"deprecated in favour of '{new}'. "
f"'{old}' will be removed in a future version.",
category=DeprecationWarning,
stacklevel=2,
)
val = kwargs.pop(old)
kwargs[new] = val
# reset filter
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return func_wrapper
return decorator
def _one_of_ours(obj, root: str):
return (
hasattr(obj, "__name__")
and not obj.__name__.split(".")[-1].startswith("_")
and getattr(
obj, '__module__', getattr(obj, '__qualname__', obj.__name__)
).startswith(root)
)
def descend_classes_and_funcs(mod: ModuleType, root: str, encountered=None):
if encountered is None:
encountered = WeakSet()
for obj in vars(mod).values():
if not _one_of_ours(obj, root):
continue
if callable(obj) and not isinstance(obj, MethodType):
yield obj
if isinstance(obj, type):
for m in vars(obj).values():
if callable(m) and _one_of_ours(m, root):
yield m
elif isinstance(obj, ModuleType) and obj not in encountered:
if obj.__name__.startswith('scanpy.tests'):
# Python’s import mechanism seems to add this to `scanpy`’s attributes
continue
encountered.add(obj)
yield from descend_classes_and_funcs(obj, root, encountered)
def annotate_doc_types(mod: ModuleType, root: str):
for c_or_f in descend_classes_and_funcs(mod, root):
c_or_f.getdoc = partial(getdoc, c_or_f)
def _doc_params(**kwds):
"""\
Docstrings should start with "\" in the first line for proper formatting.
"""
def dec(obj):
obj.__orig_doc__ = obj.__doc__
obj.__doc__ = dedent(obj.__doc__).format_map(kwds)
return obj
return dec
def _check_array_function_arguments(**kwargs):
"""Checks for invalid arguments when an array is passed.
Helper for functions that work on either AnnData objects or array-likes.
"""
# TODO: Figure out a better solution for documenting dispatched functions
invalid_args = [k for k, v in kwargs.items() if v is not None]
if len(invalid_args) > 0:
raise TypeError(
f"Arguments {invalid_args} are only valid if an AnnData object is passed."
)
def _check_use_raw(adata: AnnData, use_raw: Union[None, bool]) -> bool:
"""
Normalize checking `use_raw`.
My intentention here is to also provide a single place to throw a deprecation warning from in future.
"""
if use_raw is not None:
return use_raw
else:
if adata.raw is not None:
return True
else:
return False
# --------------------------------------------------------------------------------
# Graph stuff
# --------------------------------------------------------------------------------
def get_igraph_from_adjacency(adjacency, directed=None):
"""Get igraph graph from adjacency matrix."""
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except:
pass
if g.vcount() != adjacency.shape[0]:
logg.warning(
f'The constructed graph has only {g.vcount()} nodes. '
'Your adjacency matrix contained redundant nodes.'
)
return g
def get_sparse_from_igraph(graph, weight_attr=None):
from scipy.sparse import csr_matrix
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
# --------------------------------------------------------------------------------
# Group stuff
# --------------------------------------------------------------------------------
def compute_association_matrix_of_groups(
adata: AnnData,
prediction: str,
reference: str,
normalization: Literal['prediction', 'reference'] = 'prediction',
threshold: float = 0.01,
max_n_names: Optional[int] = 2,
):
"""Compute overlaps between groups.
See ``identify_groups`` for identifying the groups.
Parameters
----------
adata
prediction
Field name of adata.obs.
reference
Field name of adata.obs.
normalization
Whether to normalize with respect to the predicted groups or the
reference groups.
threshold
Do not consider associations whose overlap is below this fraction.
max_n_names
Control how many reference names you want to be associated with per
predicted name. Set to `None`, if you want all.
Returns
-------
asso_names
List of associated reference names
(`max_n_names` for each predicted name).
asso_matrix
Matrix where rows correspond to the predicted labels and columns to the
reference labels, entries are proportional to degree of association.
"""
if normalization not in {'prediction', 'reference'}:
raise ValueError(
'`normalization` needs to be either "prediction" or "reference".'
)
sanitize_anndata(adata)
cats = adata.obs[reference].cat.categories
for cat in cats:
if cat in settings.categories_to_ignore:
logg.info(
f'Ignoring category {cat!r} '
'as it’s in `settings.categories_to_ignore`.'
)
asso_names = []
asso_matrix = []
for ipred_group, pred_group in enumerate(adata.obs[prediction].cat.categories):
if '?' in pred_group:
pred_group = str(ipred_group)
# starting from numpy version 1.13, subtractions of boolean arrays are deprecated
mask_pred = adata.obs[prediction].values == pred_group
mask_pred_int = mask_pred.astype(np.int8)
asso_matrix += [[]]
for ref_group in adata.obs[reference].cat.categories:
mask_ref = (adata.obs[reference].values == ref_group).astype(np.int8)
mask_ref_or_pred = mask_ref.copy()
mask_ref_or_pred[mask_pred] = 1
# e.g. if the pred group is contained in mask_ref, mask_ref and
# mask_ref_or_pred are the same
if normalization == 'prediction':
# compute which fraction of the predicted group is contained in
# the ref group
ratio_contained = (
np.sum(mask_pred_int) - np.sum(mask_ref_or_pred - mask_ref)
) / np.sum(mask_pred_int)
else:
# compute which fraction of the reference group is contained in
# the predicted group
ratio_contained = (
np.sum(mask_ref) - np.sum(mask_ref_or_pred - mask_pred_int)
) / np.sum(mask_ref)
asso_matrix[-1] += [ratio_contained]
name_list_pred = [
cats[i] if cats[i] not in settings.categories_to_ignore else ''
for i in np.argsort(asso_matrix[-1])[::-1]
if asso_matrix[-1][i] > threshold
]
asso_names += ['\n'.join(name_list_pred[:max_n_names])]
Result = namedtuple(
'compute_association_matrix_of_groups', ['asso_names', 'asso_matrix']
)
return Result(asso_names=asso_names, asso_matrix=np.array(asso_matrix))
def get_associated_colors_of_groups(reference_colors, asso_matrix):
return [
{
reference_colors[i_ref]: asso_matrix[i_pred, i_ref]
for i_ref in range(asso_matrix.shape[1])
}
for i_pred in range(asso_matrix.shape[0])
]
def identify_groups(ref_labels, pred_labels, return_overlaps=False):
"""Which predicted label explains which reference label?
A predicted label explains the reference label which maximizes the minimum
of ``relative_overlaps_pred`` and ``relative_overlaps_ref``.
Compare this with ``compute_association_matrix_of_groups``.
Returns
-------
A dictionary of length ``len(np.unique(ref_labels))`` that stores for each
reference label the predicted label that best explains it.
If ``return_overlaps`` is ``True``, this will in addition return the overlap
of the reference group with the predicted group; normalized with respect to
the reference group size and the predicted group size, respectively.
"""
ref_unique, ref_counts = np.unique(ref_labels, return_counts=True)
ref_dict = dict(zip(ref_unique, ref_counts))
pred_unique, pred_counts = np.unique(pred_labels, return_counts=True)
pred_dict = dict(zip(pred_unique, pred_counts))
associated_predictions = {}
associated_overlaps = {}
for ref_label in ref_unique:
sub_pred_unique, sub_pred_counts = np.unique(
pred_labels[ref_label == ref_labels], return_counts=True
)
relative_overlaps_pred = [
sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)
]
relative_overlaps_ref = [
sub_pred_counts[i] / ref_dict[ref_label]
for i, n in enumerate(sub_pred_unique)
]
relative_overlaps = np.c_[relative_overlaps_pred, relative_overlaps_ref]
relative_overlaps_min = np.min(relative_overlaps, axis=1)
pred_best_index = np.argsort(relative_overlaps_min)[::-1]
associated_predictions[ref_label] = sub_pred_unique[pred_best_index]
associated_overlaps[ref_label] = relative_overlaps[pred_best_index]
if return_overlaps:
return associated_predictions, associated_overlaps
else:
return associated_predictions
# --------------------------------------------------------------------------------
# Other stuff
# --------------------------------------------------------------------------------
# backwards compat... remove this in the future
def sanitize_anndata(adata):
"""Transform string annotations to categoricals."""
adata._sanitize()
def view_to_actual(adata):
if adata.is_view:
warnings.warn(
"Revieved a view of an AnnData. Making a copy.",
stacklevel=2,
)
adata._init_as_actual(adata.copy())
# MASKED: moving_average function (lines 413-430)
# --------------------------------------------------------------------------------
# Deal with tool parameters
# --------------------------------------------------------------------------------
def update_params(
old_params: Mapping[str, Any],
new_params: Mapping[str, Any],
check=False,
) -> Dict[str, Any]:
"""\
Update old_params with new_params.
If check==False, this merely adds and overwrites the content of old_params.
If check==True, this only allows updating of parameters that are already
present in old_params.
Parameters
----------
old_params
new_params
check
Returns
-------
updated_params
"""
updated_params = dict(old_params)
if new_params: # allow for new_params to be None
for key, val in new_params.items():
if key not in old_params and check:
raise ValueError(
'\''
+ key
+ '\' is not a valid parameter key, '
+ 'consider one of \n'
+ str(list(old_params.keys()))
)
if val is not None:
updated_params[key] = val
return updated_params
# --------------------------------------------------------------------------------
# Others
# --------------------------------------------------------------------------------
def check_nonnegative_integers(X: Union[np.ndarray, sparse.spmatrix]):
"""Checks values of X to ensure it is count data"""
from numbers import Integral
data = X if isinstance(X, np.ndarray) else X.data
# Check no negatives
if np.signbit(data).any():
return False
# Check all are integers
elif issubclass(data.dtype.type, Integral):
return True
elif np.any(~np.equal(np.mod(data, 1), 0)):
return False
else:
return True
def select_groups(adata, groups_order_subset='all', key='groups'):
"""Get subset of groups in adata.obs[key]."""
groups_order = adata.obs[key].cat.categories
if key + '_masks' in adata.uns:
groups_masks = adata.uns[key + '_masks']
else:
groups_masks = np.zeros(
(len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool
)
for iname, name in enumerate(adata.obs[key].cat.categories):
# if the name is not found, fallback to index retrieval
if adata.obs[key].cat.categories[iname] in adata.obs[key].values:
mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values
else:
mask = str(iname) == adata.obs[key].values
groups_masks[iname] = mask
groups_ids = list(range(len(groups_order)))
if groups_order_subset != 'all':
groups_ids = []
for name in groups_order_subset:
groups_ids.append(
np.where(adata.obs[key].cat.categories.values == name)[0][0]
)
if len(groups_ids) == 0:
# fallback to index retrieval
groups_ids = np.where(
np.in1d(
np.arange(len(adata.obs[key].cat.categories)).astype(str),
np.array(groups_order_subset),
)
)[0]
if len(groups_ids) == 0:
logg.debug(
f'{np.array(groups_order_subset)} invalid! specify valid '
f'groups_order (or indices) from {adata.obs[key].cat.categories}',
)
from sys import exit
exit(0)
groups_masks = groups_masks[groups_ids]
groups_order_subset = adata.obs[key].cat.categories[groups_ids].values
else:
groups_order_subset = groups_order.values
return groups_order_subset, groups_masks
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
"""Get full tracebacks when warning is raised by setting
warnings.showwarning = warn_with_traceback
See also
--------
http://stackoverflow.com/questions/22373927/get-traceback-of-warnings
"""
import traceback
traceback.print_stack()
log = file if hasattr(file, 'write') else sys.stderr
settings.write(warnings.formatwarning(message, category, filename, lineno, line))
def subsample(
X: np.ndarray,
subsample: int = 1,
seed: int = 0,
) -> Tuple[np.ndarray, np.ndarray]:
"""\
Subsample a fraction of 1/subsample samples from the rows of X.
Parameters
----------
X
Data array.
subsample
1/subsample is the fraction of data sampled, n = X.shape[0]/subsample.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if subsample == 1 and seed == 0:
return X, np.arange(X.shape[0], dtype=int)
if seed == 0:
# this sequence is defined simply by skipping rows
# is faster than sampling
rows = np.arange(0, X.shape[0], subsample, dtype=int)
n = rows.size
Xsampled = np.array(X[rows])
else:
if seed < 0:
raise ValueError(f'Invalid seed value < 0: {seed}')
n = int(X.shape[0] / subsample)
np.random.seed(seed)
Xsampled, rows = subsample_n(X, n=n)
logg.debug(f'... subsampled to {n} of {X.shape[0]} data points')
return Xsampled, rows
def subsample_n(
X: np.ndarray, n: int = 0, seed: int = 0
) -> Tuple[np.ndarray, np.ndarray]:
"""Subsample n samples from rows of array.
Parameters
----------
X
Data array.
n
Sample size.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if n < 0:
raise ValueError('n must be greater 0')
np.random.seed(seed)
n = X.shape[0] if (n == 0 or n > X.shape[0]) else n
rows = np.random.choice(X.shape[0], size=n, replace=False)
Xsampled = X[rows]
return Xsampled, rows
def check_presence_download(filename: Path, backup_url):
"""Check if file is present otherwise download."""
if not filename.is_file():
from .readwrite import _download
_download(backup_url, filename)
def lazy_import(full_name):
"""Imports a module in a way that it’s only executed on member access"""
try:
return sys.modules[full_name]
except KeyError:
spec = importlib.util.find_spec(full_name)
module = importlib.util.module_from_spec(spec)
loader = importlib.util.LazyLoader(spec.loader)
# Make module with proper locking and get it inserted into sys.modules.
loader.exec_module(module)
return module
# --------------------------------------------------------------------------------
# Neighbors
# --------------------------------------------------------------------------------
def _fallback_to_uns(dct, conns, dists, conns_key, dists_key):
if conns is None and conns_key in dct:
conns = dct[conns_key]
if dists is None and dists_key in dct:
dists = dct[dists_key]
return conns, dists
class NeighborsView:
"""Convenience class for accessing neighbors graph representations.
Allows to access neighbors distances, connectivities and settings
dictionary in a uniform manner.
Parameters
----------
adata
AnnData object.
key
This defines where to look for neighbors dictionary,
connectivities, distances.
neigh = NeighborsView(adata, key)
neigh['distances']
neigh['connectivities']
neigh['params']
'connectivities' in neigh
'params' in neigh
is the same as
adata.obsp[adata.uns[key]['distances_key']]
adata.obsp[adata.uns[key]['connectivities_key']]
adata.uns[key]['params']
adata.uns[key]['connectivities_key'] in adata.obsp
'params' in adata.uns[key]
"""
def __init__(self, adata, key=None):
self._connectivities = None
self._distances = None
if key is None or key == 'neighbors':
if 'neighbors' not in adata.uns:
raise KeyError('No "neighbors" in .uns')
self._neighbors_dict = adata.uns['neighbors']
self._conns_key = 'connectivities'
self._dists_key = 'distances'
else:
if key not in adata.uns:
raise KeyError(f'No "{key}" in .uns')
self._neighbors_dict = adata.uns[key]
self._conns_key = self._neighbors_dict['connectivities_key']
self._dists_key = self._neighbors_dict['distances_key']
if self._conns_key in adata.obsp:
self._connectivities = adata.obsp[self._conns_key]
if self._dists_key in adata.obsp:
self._distances = adata.obsp[self._dists_key]
# fallback to uns
self._connectivities, self._distances = _fallback_to_uns(
self._neighbors_dict,
self._connectivities,
self._distances,
self._conns_key,
self._dists_key,
)
def __getitem__(self, key):
if key == 'distances':
if 'distances' not in self:
raise KeyError(f'No "{self._dists_key}" in .obsp')
return self._distances
elif key == 'connectivities':
if 'connectivities' not in self:
raise KeyError(f'No "{self._conns_key}" in .obsp')
return self._connectivities
else:
return self._neighbors_dict[key]
def __contains__(self, key):
if key == 'distances':
return self._distances is not None
elif key == 'connectivities':
return self._connectivities is not None
else:
return key in self._neighbors_dict
def _choose_graph(adata, obsp, neighbors_key):
"""Choose connectivities from neighbbors or another obsp column"""
if obsp is not None and neighbors_key is not None:
raise ValueError(
'You can\'t specify both obsp, neighbors_key. ' 'Please select only one.'
)
if obsp is not None:
return adata.obsp[obsp]
else:
neighbors = NeighborsView(adata, neighbors_key)
if 'connectivities' not in neighbors:
raise ValueError(
'You need to run `pp.neighbors` first '
'to compute a neighborhood graph.'
)
return neighbors['connectivities'] | def moving_average(a: np.ndarray, n: int):
"""Moving average over one-dimensional array.
Parameters
----------
a
One-dimensional array.
n
Number of entries to average over. n=2 means averaging over the currrent
the previous entry.
Returns
-------
An array view storing the moving average.
"""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n | 413 | 430 | """Utility functions and classes
"""
import sys
import inspect
import warnings
import importlib.util
from enum import Enum
from pathlib import Path
from weakref import WeakSet
from collections import namedtuple
from functools import partial, wraps
from types import ModuleType, MethodType
from typing import Union, Callable, Optional, Mapping, Any, Dict, Tuple
import numpy as np
from numpy import random
from scipy import sparse
from anndata import AnnData, __version__ as anndata_version
from textwrap import dedent
from packaging import version
from ._settings import settings
from ._compat import Literal
from . import logging as logg
class Empty(Enum):
token = 0
_empty = Empty.token
# e.g. https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
AnyRandom = Union[None, int, random.RandomState] # maybe in the future random.Generator
EPS = 1e-15
def check_versions():
from ._compat import pkg_version
umap_version = pkg_version("umap-learn")
if version.parse(anndata_version) < version.parse('0.6.10'):
from . import __version__
raise ImportError(
f'Scanpy {__version__} needs anndata version >=0.6.10, '
f'not {anndata_version}.\nRun `pip install anndata -U --no-deps`.'
)
if umap_version < version.parse('0.3.0'):
from . import __version__
# make this a warning, not an error
# it might be useful for people to still be able to run it
logg.warning(
f'Scanpy {__version__} needs umap ' f'version >=0.3.0, not {umap_version}.'
)
def getdoc(c_or_f: Union[Callable, type]) -> Optional[str]:
if getattr(c_or_f, '__doc__', None) is None:
return None
doc = inspect.getdoc(c_or_f)
if isinstance(c_or_f, type) and hasattr(c_or_f, '__init__'):
sig = inspect.signature(c_or_f.__init__)
else:
sig = inspect.signature(c_or_f)
def type_doc(name: str):
param: inspect.Parameter = sig.parameters[name]
cls = getattr(param.annotation, '__qualname__', repr(param.annotation))
if param.default is not param.empty:
return f'{cls}, optional (default: {param.default!r})'
else:
return cls
return '\n'.join(
f'{line} : {type_doc(line)}' if line.strip() in sig.parameters else line
for line in doc.split('\n')
)
def deprecated_arg_names(arg_mapping: Mapping[str, str]):
"""
Decorator which marks a functions keyword arguments as deprecated. It will
result in a warning being emitted when the deprecated keyword argument is
used, and the function being called with the new argument.
Parameters
----------
arg_mapping
Mapping from deprecated argument name to current argument name.
"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
for old, new in arg_mapping.items():
if old in kwargs:
warnings.warn(
f"Keyword argument '{old}' has been "
f"deprecated in favour of '{new}'. "
f"'{old}' will be removed in a future version.",
category=DeprecationWarning,
stacklevel=2,
)
val = kwargs.pop(old)
kwargs[new] = val
# reset filter
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return func_wrapper
return decorator
def _one_of_ours(obj, root: str):
return (
hasattr(obj, "__name__")
and not obj.__name__.split(".")[-1].startswith("_")
and getattr(
obj, '__module__', getattr(obj, '__qualname__', obj.__name__)
).startswith(root)
)
def descend_classes_and_funcs(mod: ModuleType, root: str, encountered=None):
if encountered is None:
encountered = WeakSet()
for obj in vars(mod).values():
if not _one_of_ours(obj, root):
continue
if callable(obj) and not isinstance(obj, MethodType):
yield obj
if isinstance(obj, type):
for m in vars(obj).values():
if callable(m) and _one_of_ours(m, root):
yield m
elif isinstance(obj, ModuleType) and obj not in encountered:
if obj.__name__.startswith('scanpy.tests'):
# Python’s import mechanism seems to add this to `scanpy`’s attributes
continue
encountered.add(obj)
yield from descend_classes_and_funcs(obj, root, encountered)
def annotate_doc_types(mod: ModuleType, root: str):
for c_or_f in descend_classes_and_funcs(mod, root):
c_or_f.getdoc = partial(getdoc, c_or_f)
def _doc_params(**kwds):
"""\
Docstrings should start with "\" in the first line for proper formatting.
"""
def dec(obj):
obj.__orig_doc__ = obj.__doc__
obj.__doc__ = dedent(obj.__doc__).format_map(kwds)
return obj
return dec
def _check_array_function_arguments(**kwargs):
"""Checks for invalid arguments when an array is passed.
Helper for functions that work on either AnnData objects or array-likes.
"""
# TODO: Figure out a better solution for documenting dispatched functions
invalid_args = [k for k, v in kwargs.items() if v is not None]
if len(invalid_args) > 0:
raise TypeError(
f"Arguments {invalid_args} are only valid if an AnnData object is passed."
)
def _check_use_raw(adata: AnnData, use_raw: Union[None, bool]) -> bool:
"""
Normalize checking `use_raw`.
My intentention here is to also provide a single place to throw a deprecation warning from in future.
"""
if use_raw is not None:
return use_raw
else:
if adata.raw is not None:
return True
else:
return False
# --------------------------------------------------------------------------------
# Graph stuff
# --------------------------------------------------------------------------------
def get_igraph_from_adjacency(adjacency, directed=None):
"""Get igraph graph from adjacency matrix."""
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except:
pass
if g.vcount() != adjacency.shape[0]:
logg.warning(
f'The constructed graph has only {g.vcount()} nodes. '
'Your adjacency matrix contained redundant nodes.'
)
return g
def get_sparse_from_igraph(graph, weight_attr=None):
from scipy.sparse import csr_matrix
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
# --------------------------------------------------------------------------------
# Group stuff
# --------------------------------------------------------------------------------
def compute_association_matrix_of_groups(
adata: AnnData,
prediction: str,
reference: str,
normalization: Literal['prediction', 'reference'] = 'prediction',
threshold: float = 0.01,
max_n_names: Optional[int] = 2,
):
"""Compute overlaps between groups.
See ``identify_groups`` for identifying the groups.
Parameters
----------
adata
prediction
Field name of adata.obs.
reference
Field name of adata.obs.
normalization
Whether to normalize with respect to the predicted groups or the
reference groups.
threshold
Do not consider associations whose overlap is below this fraction.
max_n_names
Control how many reference names you want to be associated with per
predicted name. Set to `None`, if you want all.
Returns
-------
asso_names
List of associated reference names
(`max_n_names` for each predicted name).
asso_matrix
Matrix where rows correspond to the predicted labels and columns to the
reference labels, entries are proportional to degree of association.
"""
if normalization not in {'prediction', 'reference'}:
raise ValueError(
'`normalization` needs to be either "prediction" or "reference".'
)
sanitize_anndata(adata)
cats = adata.obs[reference].cat.categories
for cat in cats:
if cat in settings.categories_to_ignore:
logg.info(
f'Ignoring category {cat!r} '
'as it’s in `settings.categories_to_ignore`.'
)
asso_names = []
asso_matrix = []
for ipred_group, pred_group in enumerate(adata.obs[prediction].cat.categories):
if '?' in pred_group:
pred_group = str(ipred_group)
# starting from numpy version 1.13, subtractions of boolean arrays are deprecated
mask_pred = adata.obs[prediction].values == pred_group
mask_pred_int = mask_pred.astype(np.int8)
asso_matrix += [[]]
for ref_group in adata.obs[reference].cat.categories:
mask_ref = (adata.obs[reference].values == ref_group).astype(np.int8)
mask_ref_or_pred = mask_ref.copy()
mask_ref_or_pred[mask_pred] = 1
# e.g. if the pred group is contained in mask_ref, mask_ref and
# mask_ref_or_pred are the same
if normalization == 'prediction':
# compute which fraction of the predicted group is contained in
# the ref group
ratio_contained = (
np.sum(mask_pred_int) - np.sum(mask_ref_or_pred - mask_ref)
) / np.sum(mask_pred_int)
else:
# compute which fraction of the reference group is contained in
# the predicted group
ratio_contained = (
np.sum(mask_ref) - np.sum(mask_ref_or_pred - mask_pred_int)
) / np.sum(mask_ref)
asso_matrix[-1] += [ratio_contained]
name_list_pred = [
cats[i] if cats[i] not in settings.categories_to_ignore else ''
for i in np.argsort(asso_matrix[-1])[::-1]
if asso_matrix[-1][i] > threshold
]
asso_names += ['\n'.join(name_list_pred[:max_n_names])]
Result = namedtuple(
'compute_association_matrix_of_groups', ['asso_names', 'asso_matrix']
)
return Result(asso_names=asso_names, asso_matrix=np.array(asso_matrix))
def get_associated_colors_of_groups(reference_colors, asso_matrix):
return [
{
reference_colors[i_ref]: asso_matrix[i_pred, i_ref]
for i_ref in range(asso_matrix.shape[1])
}
for i_pred in range(asso_matrix.shape[0])
]
def identify_groups(ref_labels, pred_labels, return_overlaps=False):
"""Which predicted label explains which reference label?
A predicted label explains the reference label which maximizes the minimum
of ``relative_overlaps_pred`` and ``relative_overlaps_ref``.
Compare this with ``compute_association_matrix_of_groups``.
Returns
-------
A dictionary of length ``len(np.unique(ref_labels))`` that stores for each
reference label the predicted label that best explains it.
If ``return_overlaps`` is ``True``, this will in addition return the overlap
of the reference group with the predicted group; normalized with respect to
the reference group size and the predicted group size, respectively.
"""
ref_unique, ref_counts = np.unique(ref_labels, return_counts=True)
ref_dict = dict(zip(ref_unique, ref_counts))
pred_unique, pred_counts = np.unique(pred_labels, return_counts=True)
pred_dict = dict(zip(pred_unique, pred_counts))
associated_predictions = {}
associated_overlaps = {}
for ref_label in ref_unique:
sub_pred_unique, sub_pred_counts = np.unique(
pred_labels[ref_label == ref_labels], return_counts=True
)
relative_overlaps_pred = [
sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)
]
relative_overlaps_ref = [
sub_pred_counts[i] / ref_dict[ref_label]
for i, n in enumerate(sub_pred_unique)
]
relative_overlaps = np.c_[relative_overlaps_pred, relative_overlaps_ref]
relative_overlaps_min = np.min(relative_overlaps, axis=1)
pred_best_index = np.argsort(relative_overlaps_min)[::-1]
associated_predictions[ref_label] = sub_pred_unique[pred_best_index]
associated_overlaps[ref_label] = relative_overlaps[pred_best_index]
if return_overlaps:
return associated_predictions, associated_overlaps
else:
return associated_predictions
# --------------------------------------------------------------------------------
# Other stuff
# --------------------------------------------------------------------------------
# backwards compat... remove this in the future
def sanitize_anndata(adata):
"""Transform string annotations to categoricals."""
adata._sanitize()
def view_to_actual(adata):
if adata.is_view:
warnings.warn(
"Revieved a view of an AnnData. Making a copy.",
stacklevel=2,
)
adata._init_as_actual(adata.copy())
def moving_average(a: np.ndarray, n: int):
"""Moving average over one-dimensional array.
Parameters
----------
a
One-dimensional array.
n
Number of entries to average over. n=2 means averaging over the currrent
the previous entry.
Returns
-------
An array view storing the moving average.
"""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
# --------------------------------------------------------------------------------
# Deal with tool parameters
# --------------------------------------------------------------------------------
def update_params(
old_params: Mapping[str, Any],
new_params: Mapping[str, Any],
check=False,
) -> Dict[str, Any]:
"""\
Update old_params with new_params.
If check==False, this merely adds and overwrites the content of old_params.
If check==True, this only allows updating of parameters that are already
present in old_params.
Parameters
----------
old_params
new_params
check
Returns
-------
updated_params
"""
updated_params = dict(old_params)
if new_params: # allow for new_params to be None
for key, val in new_params.items():
if key not in old_params and check:
raise ValueError(
'\''
+ key
+ '\' is not a valid parameter key, '
+ 'consider one of \n'
+ str(list(old_params.keys()))
)
if val is not None:
updated_params[key] = val
return updated_params
# --------------------------------------------------------------------------------
# Others
# --------------------------------------------------------------------------------
def check_nonnegative_integers(X: Union[np.ndarray, sparse.spmatrix]):
"""Checks values of X to ensure it is count data"""
from numbers import Integral
data = X if isinstance(X, np.ndarray) else X.data
# Check no negatives
if np.signbit(data).any():
return False
# Check all are integers
elif issubclass(data.dtype.type, Integral):
return True
elif np.any(~np.equal(np.mod(data, 1), 0)):
return False
else:
return True
def select_groups(adata, groups_order_subset='all', key='groups'):
"""Get subset of groups in adata.obs[key]."""
groups_order = adata.obs[key].cat.categories
if key + '_masks' in adata.uns:
groups_masks = adata.uns[key + '_masks']
else:
groups_masks = np.zeros(
(len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool
)
for iname, name in enumerate(adata.obs[key].cat.categories):
# if the name is not found, fallback to index retrieval
if adata.obs[key].cat.categories[iname] in adata.obs[key].values:
mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values
else:
mask = str(iname) == adata.obs[key].values
groups_masks[iname] = mask
groups_ids = list(range(len(groups_order)))
if groups_order_subset != 'all':
groups_ids = []
for name in groups_order_subset:
groups_ids.append(
np.where(adata.obs[key].cat.categories.values == name)[0][0]
)
if len(groups_ids) == 0:
# fallback to index retrieval
groups_ids = np.where(
np.in1d(
np.arange(len(adata.obs[key].cat.categories)).astype(str),
np.array(groups_order_subset),
)
)[0]
if len(groups_ids) == 0:
logg.debug(
f'{np.array(groups_order_subset)} invalid! specify valid '
f'groups_order (or indices) from {adata.obs[key].cat.categories}',
)
from sys import exit
exit(0)
groups_masks = groups_masks[groups_ids]
groups_order_subset = adata.obs[key].cat.categories[groups_ids].values
else:
groups_order_subset = groups_order.values
return groups_order_subset, groups_masks
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
"""Get full tracebacks when warning is raised by setting
warnings.showwarning = warn_with_traceback
See also
--------
http://stackoverflow.com/questions/22373927/get-traceback-of-warnings
"""
import traceback
traceback.print_stack()
log = file if hasattr(file, 'write') else sys.stderr
settings.write(warnings.formatwarning(message, category, filename, lineno, line))
def subsample(
X: np.ndarray,
subsample: int = 1,
seed: int = 0,
) -> Tuple[np.ndarray, np.ndarray]:
"""\
Subsample a fraction of 1/subsample samples from the rows of X.
Parameters
----------
X
Data array.
subsample
1/subsample is the fraction of data sampled, n = X.shape[0]/subsample.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if subsample == 1 and seed == 0:
return X, np.arange(X.shape[0], dtype=int)
if seed == 0:
# this sequence is defined simply by skipping rows
# is faster than sampling
rows = np.arange(0, X.shape[0], subsample, dtype=int)
n = rows.size
Xsampled = np.array(X[rows])
else:
if seed < 0:
raise ValueError(f'Invalid seed value < 0: {seed}')
n = int(X.shape[0] / subsample)
np.random.seed(seed)
Xsampled, rows = subsample_n(X, n=n)
logg.debug(f'... subsampled to {n} of {X.shape[0]} data points')
return Xsampled, rows
def subsample_n(
X: np.ndarray, n: int = 0, seed: int = 0
) -> Tuple[np.ndarray, np.ndarray]:
"""Subsample n samples from rows of array.
Parameters
----------
X
Data array.
n
Sample size.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if n < 0:
raise ValueError('n must be greater 0')
np.random.seed(seed)
n = X.shape[0] if (n == 0 or n > X.shape[0]) else n
rows = np.random.choice(X.shape[0], size=n, replace=False)
Xsampled = X[rows]
return Xsampled, rows
def check_presence_download(filename: Path, backup_url):
"""Check if file is present otherwise download."""
if not filename.is_file():
from .readwrite import _download
_download(backup_url, filename)
def lazy_import(full_name):
"""Imports a module in a way that it’s only executed on member access"""
try:
return sys.modules[full_name]
except KeyError:
spec = importlib.util.find_spec(full_name)
module = importlib.util.module_from_spec(spec)
loader = importlib.util.LazyLoader(spec.loader)
# Make module with proper locking and get it inserted into sys.modules.
loader.exec_module(module)
return module
# --------------------------------------------------------------------------------
# Neighbors
# --------------------------------------------------------------------------------
def _fallback_to_uns(dct, conns, dists, conns_key, dists_key):
if conns is None and conns_key in dct:
conns = dct[conns_key]
if dists is None and dists_key in dct:
dists = dct[dists_key]
return conns, dists
class NeighborsView:
"""Convenience class for accessing neighbors graph representations.
Allows to access neighbors distances, connectivities and settings
dictionary in a uniform manner.
Parameters
----------
adata
AnnData object.
key
This defines where to look for neighbors dictionary,
connectivities, distances.
neigh = NeighborsView(adata, key)
neigh['distances']
neigh['connectivities']
neigh['params']
'connectivities' in neigh
'params' in neigh
is the same as
adata.obsp[adata.uns[key]['distances_key']]
adata.obsp[adata.uns[key]['connectivities_key']]
adata.uns[key]['params']
adata.uns[key]['connectivities_key'] in adata.obsp
'params' in adata.uns[key]
"""
def __init__(self, adata, key=None):
self._connectivities = None
self._distances = None
if key is None or key == 'neighbors':
if 'neighbors' not in adata.uns:
raise KeyError('No "neighbors" in .uns')
self._neighbors_dict = adata.uns['neighbors']
self._conns_key = 'connectivities'
self._dists_key = 'distances'
else:
if key not in adata.uns:
raise KeyError(f'No "{key}" in .uns')
self._neighbors_dict = adata.uns[key]
self._conns_key = self._neighbors_dict['connectivities_key']
self._dists_key = self._neighbors_dict['distances_key']
if self._conns_key in adata.obsp:
self._connectivities = adata.obsp[self._conns_key]
if self._dists_key in adata.obsp:
self._distances = adata.obsp[self._dists_key]
# fallback to uns
self._connectivities, self._distances = _fallback_to_uns(
self._neighbors_dict,
self._connectivities,
self._distances,
self._conns_key,
self._dists_key,
)
def __getitem__(self, key):
if key == 'distances':
if 'distances' not in self:
raise KeyError(f'No "{self._dists_key}" in .obsp')
return self._distances
elif key == 'connectivities':
if 'connectivities' not in self:
raise KeyError(f'No "{self._conns_key}" in .obsp')
return self._connectivities
else:
return self._neighbors_dict[key]
def __contains__(self, key):
if key == 'distances':
return self._distances is not None
elif key == 'connectivities':
return self._connectivities is not None
else:
return key in self._neighbors_dict
def _choose_graph(adata, obsp, neighbors_key):
"""Choose connectivities from neighbbors or another obsp column"""
if obsp is not None and neighbors_key is not None:
raise ValueError(
'You can\'t specify both obsp, neighbors_key. ' 'Please select only one.'
)
if obsp is not None:
return adata.obsp[obsp]
else:
neighbors = NeighborsView(adata, neighbors_key)
if 'connectivities' not in neighbors:
raise ValueError(
'You need to run `pp.neighbors` first '
'to compute a neighborhood graph.'
)
return neighbors['connectivities']
|
warn_with_traceback | Get full tracebacks when warning is raised by setting
warnings.showwarning = warn_with_traceback
See also
--------
http://stackoverflow.com/questions/22373927/get-traceback-of-warnings | """Utility functions and classes
"""
import sys
import inspect
import warnings
import importlib.util
from enum import Enum
from pathlib import Path
from weakref import WeakSet
from collections import namedtuple
from functools import partial, wraps
from types import ModuleType, MethodType
from typing import Union, Callable, Optional, Mapping, Any, Dict, Tuple
import numpy as np
from numpy import random
from scipy import sparse
from anndata import AnnData, __version__ as anndata_version
from textwrap import dedent
from packaging import version
from ._settings import settings
from ._compat import Literal
from . import logging as logg
class Empty(Enum):
token = 0
_empty = Empty.token
# e.g. https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
AnyRandom = Union[None, int, random.RandomState] # maybe in the future random.Generator
EPS = 1e-15
def check_versions():
from ._compat import pkg_version
umap_version = pkg_version("umap-learn")
if version.parse(anndata_version) < version.parse('0.6.10'):
from . import __version__
raise ImportError(
f'Scanpy {__version__} needs anndata version >=0.6.10, '
f'not {anndata_version}.\nRun `pip install anndata -U --no-deps`.'
)
if umap_version < version.parse('0.3.0'):
from . import __version__
# make this a warning, not an error
# it might be useful for people to still be able to run it
logg.warning(
f'Scanpy {__version__} needs umap ' f'version >=0.3.0, not {umap_version}.'
)
def getdoc(c_or_f: Union[Callable, type]) -> Optional[str]:
if getattr(c_or_f, '__doc__', None) is None:
return None
doc = inspect.getdoc(c_or_f)
if isinstance(c_or_f, type) and hasattr(c_or_f, '__init__'):
sig = inspect.signature(c_or_f.__init__)
else:
sig = inspect.signature(c_or_f)
def type_doc(name: str):
param: inspect.Parameter = sig.parameters[name]
cls = getattr(param.annotation, '__qualname__', repr(param.annotation))
if param.default is not param.empty:
return f'{cls}, optional (default: {param.default!r})'
else:
return cls
return '\n'.join(
f'{line} : {type_doc(line)}' if line.strip() in sig.parameters else line
for line in doc.split('\n')
)
def deprecated_arg_names(arg_mapping: Mapping[str, str]):
"""
Decorator which marks a functions keyword arguments as deprecated. It will
result in a warning being emitted when the deprecated keyword argument is
used, and the function being called with the new argument.
Parameters
----------
arg_mapping
Mapping from deprecated argument name to current argument name.
"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
for old, new in arg_mapping.items():
if old in kwargs:
warnings.warn(
f"Keyword argument '{old}' has been "
f"deprecated in favour of '{new}'. "
f"'{old}' will be removed in a future version.",
category=DeprecationWarning,
stacklevel=2,
)
val = kwargs.pop(old)
kwargs[new] = val
# reset filter
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return func_wrapper
return decorator
def _one_of_ours(obj, root: str):
return (
hasattr(obj, "__name__")
and not obj.__name__.split(".")[-1].startswith("_")
and getattr(
obj, '__module__', getattr(obj, '__qualname__', obj.__name__)
).startswith(root)
)
def descend_classes_and_funcs(mod: ModuleType, root: str, encountered=None):
if encountered is None:
encountered = WeakSet()
for obj in vars(mod).values():
if not _one_of_ours(obj, root):
continue
if callable(obj) and not isinstance(obj, MethodType):
yield obj
if isinstance(obj, type):
for m in vars(obj).values():
if callable(m) and _one_of_ours(m, root):
yield m
elif isinstance(obj, ModuleType) and obj not in encountered:
if obj.__name__.startswith('scanpy.tests'):
# Python’s import mechanism seems to add this to `scanpy`’s attributes
continue
encountered.add(obj)
yield from descend_classes_and_funcs(obj, root, encountered)
def annotate_doc_types(mod: ModuleType, root: str):
for c_or_f in descend_classes_and_funcs(mod, root):
c_or_f.getdoc = partial(getdoc, c_or_f)
def _doc_params(**kwds):
"""\
Docstrings should start with "\" in the first line for proper formatting.
"""
def dec(obj):
obj.__orig_doc__ = obj.__doc__
obj.__doc__ = dedent(obj.__doc__).format_map(kwds)
return obj
return dec
def _check_array_function_arguments(**kwargs):
"""Checks for invalid arguments when an array is passed.
Helper for functions that work on either AnnData objects or array-likes.
"""
# TODO: Figure out a better solution for documenting dispatched functions
invalid_args = [k for k, v in kwargs.items() if v is not None]
if len(invalid_args) > 0:
raise TypeError(
f"Arguments {invalid_args} are only valid if an AnnData object is passed."
)
def _check_use_raw(adata: AnnData, use_raw: Union[None, bool]) -> bool:
"""
Normalize checking `use_raw`.
My intentention here is to also provide a single place to throw a deprecation warning from in future.
"""
if use_raw is not None:
return use_raw
else:
if adata.raw is not None:
return True
else:
return False
# --------------------------------------------------------------------------------
# Graph stuff
# --------------------------------------------------------------------------------
def get_igraph_from_adjacency(adjacency, directed=None):
"""Get igraph graph from adjacency matrix."""
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except:
pass
if g.vcount() != adjacency.shape[0]:
logg.warning(
f'The constructed graph has only {g.vcount()} nodes. '
'Your adjacency matrix contained redundant nodes.'
)
return g
def get_sparse_from_igraph(graph, weight_attr=None):
from scipy.sparse import csr_matrix
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
# --------------------------------------------------------------------------------
# Group stuff
# --------------------------------------------------------------------------------
def compute_association_matrix_of_groups(
adata: AnnData,
prediction: str,
reference: str,
normalization: Literal['prediction', 'reference'] = 'prediction',
threshold: float = 0.01,
max_n_names: Optional[int] = 2,
):
"""Compute overlaps between groups.
See ``identify_groups`` for identifying the groups.
Parameters
----------
adata
prediction
Field name of adata.obs.
reference
Field name of adata.obs.
normalization
Whether to normalize with respect to the predicted groups or the
reference groups.
threshold
Do not consider associations whose overlap is below this fraction.
max_n_names
Control how many reference names you want to be associated with per
predicted name. Set to `None`, if you want all.
Returns
-------
asso_names
List of associated reference names
(`max_n_names` for each predicted name).
asso_matrix
Matrix where rows correspond to the predicted labels and columns to the
reference labels, entries are proportional to degree of association.
"""
if normalization not in {'prediction', 'reference'}:
raise ValueError(
'`normalization` needs to be either "prediction" or "reference".'
)
sanitize_anndata(adata)
cats = adata.obs[reference].cat.categories
for cat in cats:
if cat in settings.categories_to_ignore:
logg.info(
f'Ignoring category {cat!r} '
'as it’s in `settings.categories_to_ignore`.'
)
asso_names = []
asso_matrix = []
for ipred_group, pred_group in enumerate(adata.obs[prediction].cat.categories):
if '?' in pred_group:
pred_group = str(ipred_group)
# starting from numpy version 1.13, subtractions of boolean arrays are deprecated
mask_pred = adata.obs[prediction].values == pred_group
mask_pred_int = mask_pred.astype(np.int8)
asso_matrix += [[]]
for ref_group in adata.obs[reference].cat.categories:
mask_ref = (adata.obs[reference].values == ref_group).astype(np.int8)
mask_ref_or_pred = mask_ref.copy()
mask_ref_or_pred[mask_pred] = 1
# e.g. if the pred group is contained in mask_ref, mask_ref and
# mask_ref_or_pred are the same
if normalization == 'prediction':
# compute which fraction of the predicted group is contained in
# the ref group
ratio_contained = (
np.sum(mask_pred_int) - np.sum(mask_ref_or_pred - mask_ref)
) / np.sum(mask_pred_int)
else:
# compute which fraction of the reference group is contained in
# the predicted group
ratio_contained = (
np.sum(mask_ref) - np.sum(mask_ref_or_pred - mask_pred_int)
) / np.sum(mask_ref)
asso_matrix[-1] += [ratio_contained]
name_list_pred = [
cats[i] if cats[i] not in settings.categories_to_ignore else ''
for i in np.argsort(asso_matrix[-1])[::-1]
if asso_matrix[-1][i] > threshold
]
asso_names += ['\n'.join(name_list_pred[:max_n_names])]
Result = namedtuple(
'compute_association_matrix_of_groups', ['asso_names', 'asso_matrix']
)
return Result(asso_names=asso_names, asso_matrix=np.array(asso_matrix))
def get_associated_colors_of_groups(reference_colors, asso_matrix):
return [
{
reference_colors[i_ref]: asso_matrix[i_pred, i_ref]
for i_ref in range(asso_matrix.shape[1])
}
for i_pred in range(asso_matrix.shape[0])
]
def identify_groups(ref_labels, pred_labels, return_overlaps=False):
"""Which predicted label explains which reference label?
A predicted label explains the reference label which maximizes the minimum
of ``relative_overlaps_pred`` and ``relative_overlaps_ref``.
Compare this with ``compute_association_matrix_of_groups``.
Returns
-------
A dictionary of length ``len(np.unique(ref_labels))`` that stores for each
reference label the predicted label that best explains it.
If ``return_overlaps`` is ``True``, this will in addition return the overlap
of the reference group with the predicted group; normalized with respect to
the reference group size and the predicted group size, respectively.
"""
ref_unique, ref_counts = np.unique(ref_labels, return_counts=True)
ref_dict = dict(zip(ref_unique, ref_counts))
pred_unique, pred_counts = np.unique(pred_labels, return_counts=True)
pred_dict = dict(zip(pred_unique, pred_counts))
associated_predictions = {}
associated_overlaps = {}
for ref_label in ref_unique:
sub_pred_unique, sub_pred_counts = np.unique(
pred_labels[ref_label == ref_labels], return_counts=True
)
relative_overlaps_pred = [
sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)
]
relative_overlaps_ref = [
sub_pred_counts[i] / ref_dict[ref_label]
for i, n in enumerate(sub_pred_unique)
]
relative_overlaps = np.c_[relative_overlaps_pred, relative_overlaps_ref]
relative_overlaps_min = np.min(relative_overlaps, axis=1)
pred_best_index = np.argsort(relative_overlaps_min)[::-1]
associated_predictions[ref_label] = sub_pred_unique[pred_best_index]
associated_overlaps[ref_label] = relative_overlaps[pred_best_index]
if return_overlaps:
return associated_predictions, associated_overlaps
else:
return associated_predictions
# --------------------------------------------------------------------------------
# Other stuff
# --------------------------------------------------------------------------------
# backwards compat... remove this in the future
def sanitize_anndata(adata):
"""Transform string annotations to categoricals."""
adata._sanitize()
def view_to_actual(adata):
if adata.is_view:
warnings.warn(
"Revieved a view of an AnnData. Making a copy.",
stacklevel=2,
)
adata._init_as_actual(adata.copy())
def moving_average(a: np.ndarray, n: int):
"""Moving average over one-dimensional array.
Parameters
----------
a
One-dimensional array.
n
Number of entries to average over. n=2 means averaging over the currrent
the previous entry.
Returns
-------
An array view storing the moving average.
"""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
# --------------------------------------------------------------------------------
# Deal with tool parameters
# --------------------------------------------------------------------------------
def update_params(
old_params: Mapping[str, Any],
new_params: Mapping[str, Any],
check=False,
) -> Dict[str, Any]:
"""\
Update old_params with new_params.
If check==False, this merely adds and overwrites the content of old_params.
If check==True, this only allows updating of parameters that are already
present in old_params.
Parameters
----------
old_params
new_params
check
Returns
-------
updated_params
"""
updated_params = dict(old_params)
if new_params: # allow for new_params to be None
for key, val in new_params.items():
if key not in old_params and check:
raise ValueError(
'\''
+ key
+ '\' is not a valid parameter key, '
+ 'consider one of \n'
+ str(list(old_params.keys()))
)
if val is not None:
updated_params[key] = val
return updated_params
# --------------------------------------------------------------------------------
# Others
# --------------------------------------------------------------------------------
def check_nonnegative_integers(X: Union[np.ndarray, sparse.spmatrix]):
"""Checks values of X to ensure it is count data"""
from numbers import Integral
data = X if isinstance(X, np.ndarray) else X.data
# Check no negatives
if np.signbit(data).any():
return False
# Check all are integers
elif issubclass(data.dtype.type, Integral):
return True
elif np.any(~np.equal(np.mod(data, 1), 0)):
return False
else:
return True
def select_groups(adata, groups_order_subset='all', key='groups'):
"""Get subset of groups in adata.obs[key]."""
groups_order = adata.obs[key].cat.categories
if key + '_masks' in adata.uns:
groups_masks = adata.uns[key + '_masks']
else:
groups_masks = np.zeros(
(len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool
)
for iname, name in enumerate(adata.obs[key].cat.categories):
# if the name is not found, fallback to index retrieval
if adata.obs[key].cat.categories[iname] in adata.obs[key].values:
mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values
else:
mask = str(iname) == adata.obs[key].values
groups_masks[iname] = mask
groups_ids = list(range(len(groups_order)))
if groups_order_subset != 'all':
groups_ids = []
for name in groups_order_subset:
groups_ids.append(
np.where(adata.obs[key].cat.categories.values == name)[0][0]
)
if len(groups_ids) == 0:
# fallback to index retrieval
groups_ids = np.where(
np.in1d(
np.arange(len(adata.obs[key].cat.categories)).astype(str),
np.array(groups_order_subset),
)
)[0]
if len(groups_ids) == 0:
logg.debug(
f'{np.array(groups_order_subset)} invalid! specify valid '
f'groups_order (or indices) from {adata.obs[key].cat.categories}',
)
from sys import exit
exit(0)
groups_masks = groups_masks[groups_ids]
groups_order_subset = adata.obs[key].cat.categories[groups_ids].values
else:
groups_order_subset = groups_order.values
return groups_order_subset, groups_masks
# MASKED: warn_with_traceback function (lines 545-558)
def subsample(
X: np.ndarray,
subsample: int = 1,
seed: int = 0,
) -> Tuple[np.ndarray, np.ndarray]:
"""\
Subsample a fraction of 1/subsample samples from the rows of X.
Parameters
----------
X
Data array.
subsample
1/subsample is the fraction of data sampled, n = X.shape[0]/subsample.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if subsample == 1 and seed == 0:
return X, np.arange(X.shape[0], dtype=int)
if seed == 0:
# this sequence is defined simply by skipping rows
# is faster than sampling
rows = np.arange(0, X.shape[0], subsample, dtype=int)
n = rows.size
Xsampled = np.array(X[rows])
else:
if seed < 0:
raise ValueError(f'Invalid seed value < 0: {seed}')
n = int(X.shape[0] / subsample)
np.random.seed(seed)
Xsampled, rows = subsample_n(X, n=n)
logg.debug(f'... subsampled to {n} of {X.shape[0]} data points')
return Xsampled, rows
def subsample_n(
X: np.ndarray, n: int = 0, seed: int = 0
) -> Tuple[np.ndarray, np.ndarray]:
"""Subsample n samples from rows of array.
Parameters
----------
X
Data array.
n
Sample size.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if n < 0:
raise ValueError('n must be greater 0')
np.random.seed(seed)
n = X.shape[0] if (n == 0 or n > X.shape[0]) else n
rows = np.random.choice(X.shape[0], size=n, replace=False)
Xsampled = X[rows]
return Xsampled, rows
def check_presence_download(filename: Path, backup_url):
"""Check if file is present otherwise download."""
if not filename.is_file():
from .readwrite import _download
_download(backup_url, filename)
def lazy_import(full_name):
"""Imports a module in a way that it’s only executed on member access"""
try:
return sys.modules[full_name]
except KeyError:
spec = importlib.util.find_spec(full_name)
module = importlib.util.module_from_spec(spec)
loader = importlib.util.LazyLoader(spec.loader)
# Make module with proper locking and get it inserted into sys.modules.
loader.exec_module(module)
return module
# --------------------------------------------------------------------------------
# Neighbors
# --------------------------------------------------------------------------------
def _fallback_to_uns(dct, conns, dists, conns_key, dists_key):
if conns is None and conns_key in dct:
conns = dct[conns_key]
if dists is None and dists_key in dct:
dists = dct[dists_key]
return conns, dists
class NeighborsView:
"""Convenience class for accessing neighbors graph representations.
Allows to access neighbors distances, connectivities and settings
dictionary in a uniform manner.
Parameters
----------
adata
AnnData object.
key
This defines where to look for neighbors dictionary,
connectivities, distances.
neigh = NeighborsView(adata, key)
neigh['distances']
neigh['connectivities']
neigh['params']
'connectivities' in neigh
'params' in neigh
is the same as
adata.obsp[adata.uns[key]['distances_key']]
adata.obsp[adata.uns[key]['connectivities_key']]
adata.uns[key]['params']
adata.uns[key]['connectivities_key'] in adata.obsp
'params' in adata.uns[key]
"""
def __init__(self, adata, key=None):
self._connectivities = None
self._distances = None
if key is None or key == 'neighbors':
if 'neighbors' not in adata.uns:
raise KeyError('No "neighbors" in .uns')
self._neighbors_dict = adata.uns['neighbors']
self._conns_key = 'connectivities'
self._dists_key = 'distances'
else:
if key not in adata.uns:
raise KeyError(f'No "{key}" in .uns')
self._neighbors_dict = adata.uns[key]
self._conns_key = self._neighbors_dict['connectivities_key']
self._dists_key = self._neighbors_dict['distances_key']
if self._conns_key in adata.obsp:
self._connectivities = adata.obsp[self._conns_key]
if self._dists_key in adata.obsp:
self._distances = adata.obsp[self._dists_key]
# fallback to uns
self._connectivities, self._distances = _fallback_to_uns(
self._neighbors_dict,
self._connectivities,
self._distances,
self._conns_key,
self._dists_key,
)
def __getitem__(self, key):
if key == 'distances':
if 'distances' not in self:
raise KeyError(f'No "{self._dists_key}" in .obsp')
return self._distances
elif key == 'connectivities':
if 'connectivities' not in self:
raise KeyError(f'No "{self._conns_key}" in .obsp')
return self._connectivities
else:
return self._neighbors_dict[key]
def __contains__(self, key):
if key == 'distances':
return self._distances is not None
elif key == 'connectivities':
return self._connectivities is not None
else:
return key in self._neighbors_dict
def _choose_graph(adata, obsp, neighbors_key):
"""Choose connectivities from neighbbors or another obsp column"""
if obsp is not None and neighbors_key is not None:
raise ValueError(
'You can\'t specify both obsp, neighbors_key. ' 'Please select only one.'
)
if obsp is not None:
return adata.obsp[obsp]
else:
neighbors = NeighborsView(adata, neighbors_key)
if 'connectivities' not in neighbors:
raise ValueError(
'You need to run `pp.neighbors` first '
'to compute a neighborhood graph.'
)
return neighbors['connectivities'] | def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
"""Get full tracebacks when warning is raised by setting
warnings.showwarning = warn_with_traceback
See also
--------
http://stackoverflow.com/questions/22373927/get-traceback-of-warnings
"""
import traceback
traceback.print_stack()
log = file if hasattr(file, 'write') else sys.stderr
settings.write(warnings.formatwarning(message, category, filename, lineno, line)) | 545 | 558 | """Utility functions and classes
"""
import sys
import inspect
import warnings
import importlib.util
from enum import Enum
from pathlib import Path
from weakref import WeakSet
from collections import namedtuple
from functools import partial, wraps
from types import ModuleType, MethodType
from typing import Union, Callable, Optional, Mapping, Any, Dict, Tuple
import numpy as np
from numpy import random
from scipy import sparse
from anndata import AnnData, __version__ as anndata_version
from textwrap import dedent
from packaging import version
from ._settings import settings
from ._compat import Literal
from . import logging as logg
class Empty(Enum):
token = 0
_empty = Empty.token
# e.g. https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
AnyRandom = Union[None, int, random.RandomState] # maybe in the future random.Generator
EPS = 1e-15
def check_versions():
from ._compat import pkg_version
umap_version = pkg_version("umap-learn")
if version.parse(anndata_version) < version.parse('0.6.10'):
from . import __version__
raise ImportError(
f'Scanpy {__version__} needs anndata version >=0.6.10, '
f'not {anndata_version}.\nRun `pip install anndata -U --no-deps`.'
)
if umap_version < version.parse('0.3.0'):
from . import __version__
# make this a warning, not an error
# it might be useful for people to still be able to run it
logg.warning(
f'Scanpy {__version__} needs umap ' f'version >=0.3.0, not {umap_version}.'
)
def getdoc(c_or_f: Union[Callable, type]) -> Optional[str]:
if getattr(c_or_f, '__doc__', None) is None:
return None
doc = inspect.getdoc(c_or_f)
if isinstance(c_or_f, type) and hasattr(c_or_f, '__init__'):
sig = inspect.signature(c_or_f.__init__)
else:
sig = inspect.signature(c_or_f)
def type_doc(name: str):
param: inspect.Parameter = sig.parameters[name]
cls = getattr(param.annotation, '__qualname__', repr(param.annotation))
if param.default is not param.empty:
return f'{cls}, optional (default: {param.default!r})'
else:
return cls
return '\n'.join(
f'{line} : {type_doc(line)}' if line.strip() in sig.parameters else line
for line in doc.split('\n')
)
def deprecated_arg_names(arg_mapping: Mapping[str, str]):
"""
Decorator which marks a functions keyword arguments as deprecated. It will
result in a warning being emitted when the deprecated keyword argument is
used, and the function being called with the new argument.
Parameters
----------
arg_mapping
Mapping from deprecated argument name to current argument name.
"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
for old, new in arg_mapping.items():
if old in kwargs:
warnings.warn(
f"Keyword argument '{old}' has been "
f"deprecated in favour of '{new}'. "
f"'{old}' will be removed in a future version.",
category=DeprecationWarning,
stacklevel=2,
)
val = kwargs.pop(old)
kwargs[new] = val
# reset filter
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return func_wrapper
return decorator
def _one_of_ours(obj, root: str):
return (
hasattr(obj, "__name__")
and not obj.__name__.split(".")[-1].startswith("_")
and getattr(
obj, '__module__', getattr(obj, '__qualname__', obj.__name__)
).startswith(root)
)
def descend_classes_and_funcs(mod: ModuleType, root: str, encountered=None):
if encountered is None:
encountered = WeakSet()
for obj in vars(mod).values():
if not _one_of_ours(obj, root):
continue
if callable(obj) and not isinstance(obj, MethodType):
yield obj
if isinstance(obj, type):
for m in vars(obj).values():
if callable(m) and _one_of_ours(m, root):
yield m
elif isinstance(obj, ModuleType) and obj not in encountered:
if obj.__name__.startswith('scanpy.tests'):
# Python’s import mechanism seems to add this to `scanpy`’s attributes
continue
encountered.add(obj)
yield from descend_classes_and_funcs(obj, root, encountered)
def annotate_doc_types(mod: ModuleType, root: str):
for c_or_f in descend_classes_and_funcs(mod, root):
c_or_f.getdoc = partial(getdoc, c_or_f)
def _doc_params(**kwds):
"""\
Docstrings should start with "\" in the first line for proper formatting.
"""
def dec(obj):
obj.__orig_doc__ = obj.__doc__
obj.__doc__ = dedent(obj.__doc__).format_map(kwds)
return obj
return dec
def _check_array_function_arguments(**kwargs):
"""Checks for invalid arguments when an array is passed.
Helper for functions that work on either AnnData objects or array-likes.
"""
# TODO: Figure out a better solution for documenting dispatched functions
invalid_args = [k for k, v in kwargs.items() if v is not None]
if len(invalid_args) > 0:
raise TypeError(
f"Arguments {invalid_args} are only valid if an AnnData object is passed."
)
def _check_use_raw(adata: AnnData, use_raw: Union[None, bool]) -> bool:
"""
Normalize checking `use_raw`.
My intentention here is to also provide a single place to throw a deprecation warning from in future.
"""
if use_raw is not None:
return use_raw
else:
if adata.raw is not None:
return True
else:
return False
# --------------------------------------------------------------------------------
# Graph stuff
# --------------------------------------------------------------------------------
def get_igraph_from_adjacency(adjacency, directed=None):
"""Get igraph graph from adjacency matrix."""
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except:
pass
if g.vcount() != adjacency.shape[0]:
logg.warning(
f'The constructed graph has only {g.vcount()} nodes. '
'Your adjacency matrix contained redundant nodes.'
)
return g
def get_sparse_from_igraph(graph, weight_attr=None):
from scipy.sparse import csr_matrix
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
# --------------------------------------------------------------------------------
# Group stuff
# --------------------------------------------------------------------------------
def compute_association_matrix_of_groups(
adata: AnnData,
prediction: str,
reference: str,
normalization: Literal['prediction', 'reference'] = 'prediction',
threshold: float = 0.01,
max_n_names: Optional[int] = 2,
):
"""Compute overlaps between groups.
See ``identify_groups`` for identifying the groups.
Parameters
----------
adata
prediction
Field name of adata.obs.
reference
Field name of adata.obs.
normalization
Whether to normalize with respect to the predicted groups or the
reference groups.
threshold
Do not consider associations whose overlap is below this fraction.
max_n_names
Control how many reference names you want to be associated with per
predicted name. Set to `None`, if you want all.
Returns
-------
asso_names
List of associated reference names
(`max_n_names` for each predicted name).
asso_matrix
Matrix where rows correspond to the predicted labels and columns to the
reference labels, entries are proportional to degree of association.
"""
if normalization not in {'prediction', 'reference'}:
raise ValueError(
'`normalization` needs to be either "prediction" or "reference".'
)
sanitize_anndata(adata)
cats = adata.obs[reference].cat.categories
for cat in cats:
if cat in settings.categories_to_ignore:
logg.info(
f'Ignoring category {cat!r} '
'as it’s in `settings.categories_to_ignore`.'
)
asso_names = []
asso_matrix = []
for ipred_group, pred_group in enumerate(adata.obs[prediction].cat.categories):
if '?' in pred_group:
pred_group = str(ipred_group)
# starting from numpy version 1.13, subtractions of boolean arrays are deprecated
mask_pred = adata.obs[prediction].values == pred_group
mask_pred_int = mask_pred.astype(np.int8)
asso_matrix += [[]]
for ref_group in adata.obs[reference].cat.categories:
mask_ref = (adata.obs[reference].values == ref_group).astype(np.int8)
mask_ref_or_pred = mask_ref.copy()
mask_ref_or_pred[mask_pred] = 1
# e.g. if the pred group is contained in mask_ref, mask_ref and
# mask_ref_or_pred are the same
if normalization == 'prediction':
# compute which fraction of the predicted group is contained in
# the ref group
ratio_contained = (
np.sum(mask_pred_int) - np.sum(mask_ref_or_pred - mask_ref)
) / np.sum(mask_pred_int)
else:
# compute which fraction of the reference group is contained in
# the predicted group
ratio_contained = (
np.sum(mask_ref) - np.sum(mask_ref_or_pred - mask_pred_int)
) / np.sum(mask_ref)
asso_matrix[-1] += [ratio_contained]
name_list_pred = [
cats[i] if cats[i] not in settings.categories_to_ignore else ''
for i in np.argsort(asso_matrix[-1])[::-1]
if asso_matrix[-1][i] > threshold
]
asso_names += ['\n'.join(name_list_pred[:max_n_names])]
Result = namedtuple(
'compute_association_matrix_of_groups', ['asso_names', 'asso_matrix']
)
return Result(asso_names=asso_names, asso_matrix=np.array(asso_matrix))
def get_associated_colors_of_groups(reference_colors, asso_matrix):
return [
{
reference_colors[i_ref]: asso_matrix[i_pred, i_ref]
for i_ref in range(asso_matrix.shape[1])
}
for i_pred in range(asso_matrix.shape[0])
]
def identify_groups(ref_labels, pred_labels, return_overlaps=False):
"""Which predicted label explains which reference label?
A predicted label explains the reference label which maximizes the minimum
of ``relative_overlaps_pred`` and ``relative_overlaps_ref``.
Compare this with ``compute_association_matrix_of_groups``.
Returns
-------
A dictionary of length ``len(np.unique(ref_labels))`` that stores for each
reference label the predicted label that best explains it.
If ``return_overlaps`` is ``True``, this will in addition return the overlap
of the reference group with the predicted group; normalized with respect to
the reference group size and the predicted group size, respectively.
"""
ref_unique, ref_counts = np.unique(ref_labels, return_counts=True)
ref_dict = dict(zip(ref_unique, ref_counts))
pred_unique, pred_counts = np.unique(pred_labels, return_counts=True)
pred_dict = dict(zip(pred_unique, pred_counts))
associated_predictions = {}
associated_overlaps = {}
for ref_label in ref_unique:
sub_pred_unique, sub_pred_counts = np.unique(
pred_labels[ref_label == ref_labels], return_counts=True
)
relative_overlaps_pred = [
sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)
]
relative_overlaps_ref = [
sub_pred_counts[i] / ref_dict[ref_label]
for i, n in enumerate(sub_pred_unique)
]
relative_overlaps = np.c_[relative_overlaps_pred, relative_overlaps_ref]
relative_overlaps_min = np.min(relative_overlaps, axis=1)
pred_best_index = np.argsort(relative_overlaps_min)[::-1]
associated_predictions[ref_label] = sub_pred_unique[pred_best_index]
associated_overlaps[ref_label] = relative_overlaps[pred_best_index]
if return_overlaps:
return associated_predictions, associated_overlaps
else:
return associated_predictions
# --------------------------------------------------------------------------------
# Other stuff
# --------------------------------------------------------------------------------
# backwards compat... remove this in the future
def sanitize_anndata(adata):
"""Transform string annotations to categoricals."""
adata._sanitize()
def view_to_actual(adata):
if adata.is_view:
warnings.warn(
"Revieved a view of an AnnData. Making a copy.",
stacklevel=2,
)
adata._init_as_actual(adata.copy())
def moving_average(a: np.ndarray, n: int):
"""Moving average over one-dimensional array.
Parameters
----------
a
One-dimensional array.
n
Number of entries to average over. n=2 means averaging over the currrent
the previous entry.
Returns
-------
An array view storing the moving average.
"""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
# --------------------------------------------------------------------------------
# Deal with tool parameters
# --------------------------------------------------------------------------------
def update_params(
old_params: Mapping[str, Any],
new_params: Mapping[str, Any],
check=False,
) -> Dict[str, Any]:
"""\
Update old_params with new_params.
If check==False, this merely adds and overwrites the content of old_params.
If check==True, this only allows updating of parameters that are already
present in old_params.
Parameters
----------
old_params
new_params
check
Returns
-------
updated_params
"""
updated_params = dict(old_params)
if new_params: # allow for new_params to be None
for key, val in new_params.items():
if key not in old_params and check:
raise ValueError(
'\''
+ key
+ '\' is not a valid parameter key, '
+ 'consider one of \n'
+ str(list(old_params.keys()))
)
if val is not None:
updated_params[key] = val
return updated_params
# --------------------------------------------------------------------------------
# Others
# --------------------------------------------------------------------------------
def check_nonnegative_integers(X: Union[np.ndarray, sparse.spmatrix]):
"""Checks values of X to ensure it is count data"""
from numbers import Integral
data = X if isinstance(X, np.ndarray) else X.data
# Check no negatives
if np.signbit(data).any():
return False
# Check all are integers
elif issubclass(data.dtype.type, Integral):
return True
elif np.any(~np.equal(np.mod(data, 1), 0)):
return False
else:
return True
def select_groups(adata, groups_order_subset='all', key='groups'):
"""Get subset of groups in adata.obs[key]."""
groups_order = adata.obs[key].cat.categories
if key + '_masks' in adata.uns:
groups_masks = adata.uns[key + '_masks']
else:
groups_masks = np.zeros(
(len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool
)
for iname, name in enumerate(adata.obs[key].cat.categories):
# if the name is not found, fallback to index retrieval
if adata.obs[key].cat.categories[iname] in adata.obs[key].values:
mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values
else:
mask = str(iname) == adata.obs[key].values
groups_masks[iname] = mask
groups_ids = list(range(len(groups_order)))
if groups_order_subset != 'all':
groups_ids = []
for name in groups_order_subset:
groups_ids.append(
np.where(adata.obs[key].cat.categories.values == name)[0][0]
)
if len(groups_ids) == 0:
# fallback to index retrieval
groups_ids = np.where(
np.in1d(
np.arange(len(adata.obs[key].cat.categories)).astype(str),
np.array(groups_order_subset),
)
)[0]
if len(groups_ids) == 0:
logg.debug(
f'{np.array(groups_order_subset)} invalid! specify valid '
f'groups_order (or indices) from {adata.obs[key].cat.categories}',
)
from sys import exit
exit(0)
groups_masks = groups_masks[groups_ids]
groups_order_subset = adata.obs[key].cat.categories[groups_ids].values
else:
groups_order_subset = groups_order.values
return groups_order_subset, groups_masks
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
"""Get full tracebacks when warning is raised by setting
warnings.showwarning = warn_with_traceback
See also
--------
http://stackoverflow.com/questions/22373927/get-traceback-of-warnings
"""
import traceback
traceback.print_stack()
log = file if hasattr(file, 'write') else sys.stderr
settings.write(warnings.formatwarning(message, category, filename, lineno, line))
def subsample(
X: np.ndarray,
subsample: int = 1,
seed: int = 0,
) -> Tuple[np.ndarray, np.ndarray]:
"""\
Subsample a fraction of 1/subsample samples from the rows of X.
Parameters
----------
X
Data array.
subsample
1/subsample is the fraction of data sampled, n = X.shape[0]/subsample.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if subsample == 1 and seed == 0:
return X, np.arange(X.shape[0], dtype=int)
if seed == 0:
# this sequence is defined simply by skipping rows
# is faster than sampling
rows = np.arange(0, X.shape[0], subsample, dtype=int)
n = rows.size
Xsampled = np.array(X[rows])
else:
if seed < 0:
raise ValueError(f'Invalid seed value < 0: {seed}')
n = int(X.shape[0] / subsample)
np.random.seed(seed)
Xsampled, rows = subsample_n(X, n=n)
logg.debug(f'... subsampled to {n} of {X.shape[0]} data points')
return Xsampled, rows
def subsample_n(
X: np.ndarray, n: int = 0, seed: int = 0
) -> Tuple[np.ndarray, np.ndarray]:
"""Subsample n samples from rows of array.
Parameters
----------
X
Data array.
n
Sample size.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if n < 0:
raise ValueError('n must be greater 0')
np.random.seed(seed)
n = X.shape[0] if (n == 0 or n > X.shape[0]) else n
rows = np.random.choice(X.shape[0], size=n, replace=False)
Xsampled = X[rows]
return Xsampled, rows
def check_presence_download(filename: Path, backup_url):
"""Check if file is present otherwise download."""
if not filename.is_file():
from .readwrite import _download
_download(backup_url, filename)
def lazy_import(full_name):
"""Imports a module in a way that it’s only executed on member access"""
try:
return sys.modules[full_name]
except KeyError:
spec = importlib.util.find_spec(full_name)
module = importlib.util.module_from_spec(spec)
loader = importlib.util.LazyLoader(spec.loader)
# Make module with proper locking and get it inserted into sys.modules.
loader.exec_module(module)
return module
# --------------------------------------------------------------------------------
# Neighbors
# --------------------------------------------------------------------------------
def _fallback_to_uns(dct, conns, dists, conns_key, dists_key):
if conns is None and conns_key in dct:
conns = dct[conns_key]
if dists is None and dists_key in dct:
dists = dct[dists_key]
return conns, dists
class NeighborsView:
"""Convenience class for accessing neighbors graph representations.
Allows to access neighbors distances, connectivities and settings
dictionary in a uniform manner.
Parameters
----------
adata
AnnData object.
key
This defines where to look for neighbors dictionary,
connectivities, distances.
neigh = NeighborsView(adata, key)
neigh['distances']
neigh['connectivities']
neigh['params']
'connectivities' in neigh
'params' in neigh
is the same as
adata.obsp[adata.uns[key]['distances_key']]
adata.obsp[adata.uns[key]['connectivities_key']]
adata.uns[key]['params']
adata.uns[key]['connectivities_key'] in adata.obsp
'params' in adata.uns[key]
"""
def __init__(self, adata, key=None):
self._connectivities = None
self._distances = None
if key is None or key == 'neighbors':
if 'neighbors' not in adata.uns:
raise KeyError('No "neighbors" in .uns')
self._neighbors_dict = adata.uns['neighbors']
self._conns_key = 'connectivities'
self._dists_key = 'distances'
else:
if key not in adata.uns:
raise KeyError(f'No "{key}" in .uns')
self._neighbors_dict = adata.uns[key]
self._conns_key = self._neighbors_dict['connectivities_key']
self._dists_key = self._neighbors_dict['distances_key']
if self._conns_key in adata.obsp:
self._connectivities = adata.obsp[self._conns_key]
if self._dists_key in adata.obsp:
self._distances = adata.obsp[self._dists_key]
# fallback to uns
self._connectivities, self._distances = _fallback_to_uns(
self._neighbors_dict,
self._connectivities,
self._distances,
self._conns_key,
self._dists_key,
)
def __getitem__(self, key):
if key == 'distances':
if 'distances' not in self:
raise KeyError(f'No "{self._dists_key}" in .obsp')
return self._distances
elif key == 'connectivities':
if 'connectivities' not in self:
raise KeyError(f'No "{self._conns_key}" in .obsp')
return self._connectivities
else:
return self._neighbors_dict[key]
def __contains__(self, key):
if key == 'distances':
return self._distances is not None
elif key == 'connectivities':
return self._connectivities is not None
else:
return key in self._neighbors_dict
def _choose_graph(adata, obsp, neighbors_key):
"""Choose connectivities from neighbbors or another obsp column"""
if obsp is not None and neighbors_key is not None:
raise ValueError(
'You can\'t specify both obsp, neighbors_key. ' 'Please select only one.'
)
if obsp is not None:
return adata.obsp[obsp]
else:
neighbors = NeighborsView(adata, neighbors_key)
if 'connectivities' not in neighbors:
raise ValueError(
'You need to run `pp.neighbors` first '
'to compute a neighborhood graph.'
)
return neighbors['connectivities']
|
backup_models | Save the current best models.
Save CER model, the best loss model and the best WER model. This occurs
at a specified period.
Args:
i: no. of iterations. | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
'''
In this file, we define the classes that live inside 'worker 0', the worker
responsible for orchestration and aggregation. The main class is the
OptimizationServer, which sends clients to the other workers to process and
combines the resulting models.
'''
import json
import logging
import os
import random
import shutil
import time
from collections import defaultdict
import numpy as np
import torch
# Internal imports
from core.globals import TRAINING_FRAMEWORK_TYPE
if TRAINING_FRAMEWORK_TYPE == 'mpi':
import core.federated as federated
else:
raise NotImplementedError('{} is not supported'.format(TRAINING_FRAMEWORK_TYPE))
from core.evaluation import Evaluation
from core.client import Client
from .strategies import select_strategy
from .trainer import (
ModelUpdater,
Trainer,
set_component_wise_lr,
)
from utils import (
get_lr,
print_rank,
update_json_log,
)
# For profiling
import cProfile
import pstats
# AzureML-related libs
from azureml.core import Run
run = Run.get_context()
class OptimizationServer(federated.Server):
def __init__(self, num_clients, model, optimizer, ss_scheduler, data_path, model_path, train_dataloader,
val_dataloader, test_dataloader, config, config_server):
'''Implement Server's orchestration and aggregation.
This is the main Server class, that actually implements orchestration
and aggregation, inheriting from `federated.Server`, which deals with
communication only.
The `train` method is central in FLUTE, as it defines good part of what
happens during training.
Args:
num_clients (int): total available clients.
model (torch.nn.Module): neural network model.
optimizer (torch.optim.Optimizer): optimizer.
ss_scheduler: scheduled sampling scheduler.
data_path (str): points to where data is.
model_path (str): points to where pretrained model is.
train_dataloader (torch.utils.data.DataLoader): dataloader for training
val_dataloader (torch.utils.data.DataLoader): dataloader for validation
test_dataloader (torch.utils.data.DataLoader): dataloader for test, can be None
config (dict): JSON style configuration parameters
config_server: deprecated, kept for API compatibility only.
'''
super().__init__()
# Initialize all attributes from arguments
self.client_idx_list = list(range(num_clients))
self.config = config
server_config = config['server_config']
decoder_config = config.get('decoder_config', None)
self.max_iteration = server_config['max_iteration']
self.do_clustering = server_config.get('clustering', False)
self.num_clients_per_iteration = [int(x) for x in server_config['num_clients_per_iteration'].split(',')] \
if isinstance(server_config['num_clients_per_iteration'], str) \
else [server_config['num_clients_per_iteration']]
self.val_freq = server_config['val_freq']
self.req_freq = server_config['rec_freq']
self.evaluation = Evaluation(config, model_path, self.process_testvalidate, val_dataloader, test_dataloader)
# TODO: does this need to be adjusted for custom metrics?
self.metrics = {
'best_val_loss': float('inf'),
'best_val_acc': 0.0,
'best_test_loss': float('inf'),
'best_test_acc': 0.0
}
self.model_backup_freq = server_config.get('model_backup_freq', 100)
self.worker_trainer_config = server_config.get('trainer_config', {})
self.aggregate_median = server_config['aggregate_median']
self.initial_lr_client = server_config.get('initial_lr_client', -1.0)
self.lr_decay_factor = server_config.get('lr_decay_factor', 1.0)
self.model_type = config['model_config']['model_type']
self.quant_thresh = config['client_config'].get('quant_thresh', None)
self.quant_bits = config['client_config'].get('quant_bits', 10)
self.list_of_train_data = config['client_config']['data_config']['train']['list_of_train_data']
self.data_path = data_path
# Get max grad norm from data config
if 'train' in server_config['data_config']:
max_grad_norm = server_config['data_config']['train'].get('max_grad_norm', None)
else:
max_grad_norm = None
# Creating an instance to update the model with stats aggregated from workers
self.worker_trainer = ModelUpdater(
model=model,
optimizer=optimizer,
ss_scheduler=ss_scheduler,
train_dataloader=train_dataloader if train_dataloader is not None else val_dataloader,
val_dataloader=val_dataloader,
max_grad_norm=max_grad_norm,
anneal_config=server_config['annealing_config'],
model_type=self.model_type,
decoder_config=decoder_config
)
self.metrics['worker_trainer'] = self.worker_trainer
# Creating an instance for the server-side trainer (runs mini-batch SGD)
self.server_replay_iterations = None
self.server_trainer = None
if train_dataloader is not None:
assert 'server_replay_config' in server_config, 'server_replay_config is not set'
assert 'optimizer_config' in server_config[
'server_replay_config'], 'server-side replay training optimizer is not set'
self.server_optimizer_config = server_config['server_replay_config']['optimizer_config']
self.server_trainer_config = server_config['server_replay_config'].get('trainer_config', {})
self.server_replay_iterations = server_config['server_replay_config']['server_iterations']
self.server_trainer = Trainer(
model=model,
optimizer=None,
ss_scheduler=ss_scheduler,
train_dataloader=train_dataloader,
server_replay_config=server_config['server_replay_config'],
val_dataloader=None,
max_grad_norm=server_config['server_replay_config']\
.get('max_grad_norm',server_config['data_config']['train']\
.get('max_grad_norm',None)),
anneal_config=server_config['server_replay_config'].get('annealing_config', None)
)
self.skip_model_update = False # will not update the model if True
self.train_loss = 0.0
self.model_path = model_path
self.best_model_criterion = server_config['best_model_criterion']
self.fall_back_to_best_model = server_config['fall_back_to_best_model']
self.last_model_path = os.path.join(self.model_path, 'latest_model.tar')
self.best_model_path = os.path.join(self.model_path,
'best_val_{}_model.tar'.format(self.best_model_criterion))
self.log_path = os.path.join(self.model_path, 'status_log.json')
self.cur_iter_no = 0 # keep the iteration number for Tensor board plotting
self.lr_weight = 1.0
self.losses = []
self.no_label_updates = 0 # no. label updates
# Update the parameters above if the log file
if server_config.get('resume_from_checkpoint', False):
self.load_saved_status()
# Decoding config
self.decoder_config = decoder_config
self.spm_model = server_config['data_config']['test'].get('spm_model', None)
self.do_profiling = server_config.get('do_profiling', False)
# Parallel processing
self.clients_in_parallel = config['client_config'].get('clients_in_parallel', None)
StrategyClass = select_strategy(config['strategy'])
self.strategy = StrategyClass('server', self.config, self.model_path)
print_rank(f'Server successfully instantiated strategy {self.strategy}', loglevel=logging.DEBUG)
def load_saved_status(self):
'''Load checkpoint from disk'''
# Check if model is on disk, if so loads it onto trainer
if os.path.exists(self.last_model_path):
print_rank('Resuming from checkpoint model {}'.format(self.last_model_path))
self.worker_trainer.load(self.last_model_path, update_lr_scheduler=True, update_ss_scheduler=True)
if self.server_trainer is not None:
self.server_trainer.model = self.worker_trainer.model # make sure that the models are in sync
# Check if log is on disk, if so loads it onto current stats
if os.path.exists(self.log_path):
with open(self.log_path, 'r') as logfp: # loading the iteration no., best loss and CER
elems = json.load(logfp)
self.cur_iter_no = elems.get('i', 0)
self.metrics['best_val_loss'] = elems.get('best_val_loss', float('inf'))
self.metrics['best_val_acc'] = elems.get('best_val_acc', 0)
self.metrics['best_test_loss'] = elems.get('best_test_loss', float('inf'))
self.metrics['best_test_acc'] = elems.get('best_test_acc', 0)
self.lr_weight = elems.get('weight', 1.0)
self.no_label_updates = elems.get('num_label_updates', 0)
print_rank(f'Resuming from status_log: cur_iter: {self.cur_iter_no}')
def run(self):
'''Trigger training.
This is a simple wrapper to the `train` method.
'''
print_rank('server started')
self.train()
print_rank('server terminated')
def train(self):
'''Main method for training.'''
self.run_stats = {
'secsPerClientRound': [],
'secsPerClient': [],
'secsPerClientTraining': [],
'secsPerClientSetup': [],
'secsPerClientFull': [],
'secsPerRoundHousekeeping': [],
'secsPerRoundTotal': [],
'mpiCosts': []
}
run.log('Max iterations', self.max_iteration)
try:
self.worker_trainer.model.cuda() if torch.cuda.is_available() else None
# Do an initial validation round to understand the pretrained model's validation accuracy
# Skip if we resumed from a checkpoint (cur_iter_no > 0)
eval_list = []
if self.cur_iter_no == 0:
if self.config['server_config']['initial_rec']:
eval_list.append('test')
if self.config['server_config']['initial_val']:
eval_list.append('val')
run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))
print_rank("Running {} at itr={}".format(eval_list, self.cur_iter_no))
self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)
eval_list = [] # some cleanup
# Dump all the information in aggregate_metric
print_rank('Saving Model Before Starting Training', loglevel=logging.INFO)
for token in ['best_val_loss', 'best_val_acc', 'best_test_acc', 'latest']:
self.worker_trainer.save(
model_path=self.model_path,
token=token,
config=self.config['server_config']
)
# Training loop
self.worker_trainer.model.train()
for i in range(self.cur_iter_no, self.max_iteration):
begin = time.time()
metrics_payload = {}
def log_metric(k, v):
metrics_payload[k] = v
print_rank('==== iteration {}'.format(i))
log_metric('Current iteration', i)
# Initial value for the learning rate of the worker
initial_lr = self.initial_lr_client * self.lr_weight
print_rank('Client learning rate {}'.format(initial_lr))
# Run training on clients
self.worker_trainer.model.zero_grad()
self.train_loss = []
server_data = (
initial_lr,
[p.data.to(torch.device('cpu')) for p in self.worker_trainer.model.parameters()]
)
# Random number of clients per iteration
if len(self.num_clients_per_iteration) > 1:
num_clients_curr_iter = random.randint(
self.num_clients_per_iteration[0],
self.num_clients_per_iteration[1]
)
else:
num_clients_curr_iter = self.num_clients_per_iteration[0]
log_metric('Clients for round', num_clients_curr_iter)
# Perform annealing in quantization threshold
if self.quant_thresh is not None:
self.config['client_config']['quant_thresh'] *= self.config['client_config'].get('quant_anneal', 1.0)
self.quant_thresh = self.config['client_config']['quant_thresh']
log_metric('Quantization Thresh.', self.config['client_config']['quant_thresh'])
# Create the pool of clients -- sample from this pool to assign to workers
sampled_idx_clients = random.sample(self.client_idx_list,
num_clients_curr_iter) if num_clients_curr_iter > 0 else self.client_idx_list
sampled_clients = [
Client(
client_id,
self.config,
self.config['client_config']['type'] == 'optimization',
None
) for client_id in sampled_idx_clients
]
# Initialize stats
clients_begin = time.time()
client_losses = []
client_mag_grads = []
client_mean_grads = []
client_var_grads = []
client_norm_grads = []
self.run_stats['secsPerClient'].append([])
self.run_stats['secsPerClientFull'].append([])
self.run_stats['secsPerClientTraining'].append([])
self.run_stats['secsPerClientSetup'].append([])
self.run_stats['mpiCosts'].append([])
# Check if we want privacy metrics
apply_privacy_metrics = self.config.get('privacy_metrics_config', None) and \
self.config['privacy_metrics_config']['apply_metrics']
adaptive_leakage = apply_privacy_metrics and \
self.config['privacy_metrics_config'].get('adaptive_leakage_threshold', None)
if apply_privacy_metrics:
privacy_metrics_stats = defaultdict(list)
# Initialize profiler
profiler = None
if self.do_profiling:
profiler = cProfile.Profile()
profiler.enable()
# Reset gradient for the model before assigning the new gradients
self.worker_trainer.model.zero_grad()
for client_output in self.process_clients(sampled_clients, server_data, self.clients_in_parallel):
# Process client output
client_timestamp = client_output['ts']
client_stats = client_output['cs']
client_loss = client_output['tl']
client_mag_grad = client_output['mg']
client_mean_grad = client_output['ng']
client_var_grad = client_output['vg']
client_norm_grad = client_output['rg']
client_payload = client_output['pl']
if apply_privacy_metrics:
privacy_stats = client_output['ps']
for metric, value in privacy_stats.items():
privacy_metrics_stats[metric].append(value)
self.run_stats['mpiCosts'][-1].append(time.time() - client_timestamp)
# Get actual pseudo-gradients for aggregation
payload_processed = self.strategy.process_individual_payload(self.worker_trainer, client_payload)
if not payload_processed:
print_rank('Dropping client', loglevel=logging.DEBUG)
num_clients_curr_iter -= 1
continue
# Aggregate stats
self.train_loss.append(client_loss)
client_losses.append(client_loss)
client_mag_grads.append(client_mag_grad.item())
client_mean_grads.append(client_mean_grad.item())
client_var_grads.append(client_var_grad.item())
client_norm_grads.append(client_norm_grad.item())
# Mark the end of client processing
client_end = time.time()
self.run_stats['secsPerClientFull'][-1].append(client_stats['full cost'])
self.run_stats['secsPerClientTraining'][-1].append(client_stats['training'])
self.run_stats['secsPerClientSetup'][-1].append(client_stats['setup'])
self.run_stats['secsPerClient'][-1].append(client_end - clients_begin)
# Tear down profiler
if self.do_profiling:
profiler.disable()
stats = pstats.Stats(profiler)
stats.sort_stats('cumulative').print_stats()
# Prepare output
client_mag_grads = np.array(client_mag_grads)
client_mean_grads = np.array(client_mean_grads)
client_var_grads = np.array(client_var_grads)
client_norm_grads = np.array(client_norm_grads)
client_stats = (client_mag_grads, client_mean_grads, client_var_grads)
dump_norm_stats = self.config.get('dump_norm_stats', False)
if dump_norm_stats:
with open(os.path.join(self.model_path, 'norm_stats.txt'), 'a', encoding='utf-8') as outF:
outF.write('{}\n'.format(json.dumps(list(client_norm_grads))))
# Print the privacy metrics
if apply_privacy_metrics:
for metric, values in privacy_metrics_stats.items():
if metric == 'Dropped clients':
log_metric(metric, sum(values))
else:
log_metric(metric, max(values))
if type(adaptive_leakage) is float:
values = privacy_metrics_stats['Practical epsilon (Max leakage)']
new_threshold = list(sorted(values))[int(adaptive_leakage*len(values))]
print_rank('Updating leakage threshold to {}'.format(new_threshold))
self.config['privacy_metrics_config']['max_allowed_leakage'] = new_threshold
# Mark that all clients have been processed
end = time.time()
self.run_stats['secsPerClientRound'].append(end - begin)
begin = end
# Log the training loss to tensorboard/AML
log_metric('Training loss', sum(self.train_loss))
# Combine payloads
self.losses = self.strategy.combine_payloads(
worker_trainer=self.worker_trainer,
curr_iter=i,
num_clients_curr_iter=num_clients_curr_iter,
client_stats=client_stats,
logger=log_metric,
)
# Run a couple of iterations of training data on the server
if self.server_trainer is not None:
print_rank('Running replay iterations on server')
if 'updatable_names' in self.server_trainer_config:
set_component_wise_lr(
self.worker_trainer.model,
self.server_optimizer_config,
self.server_trainer_config['updatable_names']
)
self.server_trainer.prepare_iteration(self.worker_trainer.model)
self.server_trainer.train_desired_samples(self.server_replay_iterations)
self.worker_trainer.model.load_state_dict(self.server_trainer.model.state_dict())
torch.cuda.empty_cache()
# Update a sampling scheduler
print_rank('Run ss scheduler')
self.worker_trainer.run_ss_scheduler()
# Run inference and score on val/test depending on the iter. number
if ((i+1) % self.val_freq) == 0:
eval_list.append("val")
if ((i+1) % self.req_freq) == 0 :
eval_list.append("test")
if len(eval_list)> 0:
print_rank('Running {} at itr={}'.format(eval_list,i+1))
self.metrics['worker_trainer'] = self.worker_trainer
self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)
self.losses = self.evaluation.losses
eval_list = []
# Create a schedule for the initial_lr (for the worker)
if 'val' in eval_list:
run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))
if not (self.losses[0] < self.metrics['best_val_loss']):
self.lr_weight *= self.lr_decay_factor
print_rank('LOG: Client weight of learning rate {}..'.format(self.lr_weight))
# Backup the current best models
self.backup_models(i)
# Fall back to the best model if the option is enabled
self.fall_back_to_prev_best_status()
# Logging the latest best values
update_json_log(
self.log_path,
{
'i': i + 1,
'best_val_loss': float(self.metrics['best_val_loss']),
'best_val_acc': float(self.metrics['best_val_acc']),
'best_test_loss': float(self.metrics['best_test_loss']),
'best_test_acc': float(self.metrics['best_test_acc']),
'weight': float(self.lr_weight),
'num_label_updates': int(self.no_label_updates)
},
)
end = time.time()
# Aggregate stats
self.run_stats['secsPerRoundHousekeeping'].append(end - begin)
self.run_stats['secsPerRoundTotal'].append(self.run_stats['secsPerClientRound'][-1] + \
self.run_stats['secsPerRoundHousekeeping'][-1])
log_metric('secsPerRoundTotal', self.run_stats['secsPerRoundTotal'][-1])
if self.do_profiling:
log_metric('secsPerClientRound', self.run_stats['secsPerClientRound'][-1])
log_metric('secsPerRoundHousekeeping', self.run_stats['secsPerRoundHousekeeping'][-1])
metrics_for_stats = [
'secsPerClient',
'secsPerClientTraining',
'secsPerClientFull',
'secsPerClientSetup',
'mpiCosts',
]
for metric in metrics_for_stats:
log_metric(f'{metric}Mean', np.mean(self.run_stats[metric][-1]))
log_metric(f'{metric}Median', np.median(self.run_stats[metric][-1]))
log_metric(f'{metric}Max', max(self.run_stats[metric][-1]))
for k in self.run_stats:
if k in metrics_for_stats:
print_rank('{}: {}'.format(k, max(self.run_stats[k][-1])), loglevel=logging.DEBUG)
else:
print_rank('{}: {}'.format(k, self.run_stats[k][-1]), loglevel=logging.DEBUG)
# Log all the metrics
for k in metrics_payload:
run.log(k, metrics_payload[k])
finally: # perform cleanup even if error was raised above
self.terminate_workers(terminate=(not self.do_clustering))
# MASKED: backup_models function (lines 539-568)
def fall_back_to_prev_best_status(self):
'''Go back to the past best status and switch to the recent best model.'''
if self.fall_back_to_best_model:
print_rank('falling back to model {}'.format(self.best_model_path))
# Save current learning rate
tmp_lr = get_lr(self.worker_trainer.optimizer)
# Load previous best model
self.worker_trainer.load(self.best_model_path, update_lr_scheduler=False, update_ss_scheduler=False)
# Update previous learning rate on optimizer
for g in self.worker_trainer.optimizer.param_groups:
g['lr'] = tmp_lr
if self.server_trainer is not None:
self.server_trainer.model = self.worker_trainer.model # make sure that the models are in sync
def select_server(server_type, config):
'''Select a server type using different possible strings.
Right now this just returns `OptimizationServer`, but this
function could be useful when there are multiple choices of
server.
Args:
server_type (str): indicates server choice.
config (dict): config parsed from YAML, passed so that
parameters can be used to select a given server.
'''
return OptimizationServer | def backup_models(self, i):
'''Save the current best models.
Save CER model, the best loss model and the best WER model. This occurs
at a specified period.
Args:
i: no. of iterations.
'''
# Always save the latest model
self.worker_trainer.save(
model_path=self.model_path,
token='latest',
config=self.config['server_config'],
)
if (i % self.model_backup_freq) == 0: # save the current best models
self.worker_trainer.save(
model_path=self.model_path,
token='epoch{}'.format(i),
config=self.config['server_config']
)
for bodyname in ['best_val_acc', 'best_val_loss', 'best_test_acc']:
src_model_path = os.path.join(self.model_path, '{}_model.tar'.format(bodyname))
if os.path.exists(src_model_path):
dst_model_path = os.path.join(self.model_path, 'epoch{}_{}_model.tar'.format(i, bodyname))
shutil.copyfile(src_model_path, dst_model_path)
print_rank('Saved {}'.format(dst_model_path)) | 539 | 568 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
'''
In this file, we define the classes that live inside 'worker 0', the worker
responsible for orchestration and aggregation. The main class is the
OptimizationServer, which sends clients to the other workers to process and
combines the resulting models.
'''
import json
import logging
import os
import random
import shutil
import time
from collections import defaultdict
import numpy as np
import torch
# Internal imports
from core.globals import TRAINING_FRAMEWORK_TYPE
if TRAINING_FRAMEWORK_TYPE == 'mpi':
import core.federated as federated
else:
raise NotImplementedError('{} is not supported'.format(TRAINING_FRAMEWORK_TYPE))
from core.evaluation import Evaluation
from core.client import Client
from .strategies import select_strategy
from .trainer import (
ModelUpdater,
Trainer,
set_component_wise_lr,
)
from utils import (
get_lr,
print_rank,
update_json_log,
)
# For profiling
import cProfile
import pstats
# AzureML-related libs
from azureml.core import Run
run = Run.get_context()
class OptimizationServer(federated.Server):
def __init__(self, num_clients, model, optimizer, ss_scheduler, data_path, model_path, train_dataloader,
val_dataloader, test_dataloader, config, config_server):
'''Implement Server's orchestration and aggregation.
This is the main Server class, that actually implements orchestration
and aggregation, inheriting from `federated.Server`, which deals with
communication only.
The `train` method is central in FLUTE, as it defines good part of what
happens during training.
Args:
num_clients (int): total available clients.
model (torch.nn.Module): neural network model.
optimizer (torch.optim.Optimizer): optimizer.
ss_scheduler: scheduled sampling scheduler.
data_path (str): points to where data is.
model_path (str): points to where pretrained model is.
train_dataloader (torch.utils.data.DataLoader): dataloader for training
val_dataloader (torch.utils.data.DataLoader): dataloader for validation
test_dataloader (torch.utils.data.DataLoader): dataloader for test, can be None
config (dict): JSON style configuration parameters
config_server: deprecated, kept for API compatibility only.
'''
super().__init__()
# Initialize all attributes from arguments
self.client_idx_list = list(range(num_clients))
self.config = config
server_config = config['server_config']
decoder_config = config.get('decoder_config', None)
self.max_iteration = server_config['max_iteration']
self.do_clustering = server_config.get('clustering', False)
self.num_clients_per_iteration = [int(x) for x in server_config['num_clients_per_iteration'].split(',')] \
if isinstance(server_config['num_clients_per_iteration'], str) \
else [server_config['num_clients_per_iteration']]
self.val_freq = server_config['val_freq']
self.req_freq = server_config['rec_freq']
self.evaluation = Evaluation(config, model_path, self.process_testvalidate, val_dataloader, test_dataloader)
# TODO: does this need to be adjusted for custom metrics?
self.metrics = {
'best_val_loss': float('inf'),
'best_val_acc': 0.0,
'best_test_loss': float('inf'),
'best_test_acc': 0.0
}
self.model_backup_freq = server_config.get('model_backup_freq', 100)
self.worker_trainer_config = server_config.get('trainer_config', {})
self.aggregate_median = server_config['aggregate_median']
self.initial_lr_client = server_config.get('initial_lr_client', -1.0)
self.lr_decay_factor = server_config.get('lr_decay_factor', 1.0)
self.model_type = config['model_config']['model_type']
self.quant_thresh = config['client_config'].get('quant_thresh', None)
self.quant_bits = config['client_config'].get('quant_bits', 10)
self.list_of_train_data = config['client_config']['data_config']['train']['list_of_train_data']
self.data_path = data_path
# Get max grad norm from data config
if 'train' in server_config['data_config']:
max_grad_norm = server_config['data_config']['train'].get('max_grad_norm', None)
else:
max_grad_norm = None
# Creating an instance to update the model with stats aggregated from workers
self.worker_trainer = ModelUpdater(
model=model,
optimizer=optimizer,
ss_scheduler=ss_scheduler,
train_dataloader=train_dataloader if train_dataloader is not None else val_dataloader,
val_dataloader=val_dataloader,
max_grad_norm=max_grad_norm,
anneal_config=server_config['annealing_config'],
model_type=self.model_type,
decoder_config=decoder_config
)
self.metrics['worker_trainer'] = self.worker_trainer
# Creating an instance for the server-side trainer (runs mini-batch SGD)
self.server_replay_iterations = None
self.server_trainer = None
if train_dataloader is not None:
assert 'server_replay_config' in server_config, 'server_replay_config is not set'
assert 'optimizer_config' in server_config[
'server_replay_config'], 'server-side replay training optimizer is not set'
self.server_optimizer_config = server_config['server_replay_config']['optimizer_config']
self.server_trainer_config = server_config['server_replay_config'].get('trainer_config', {})
self.server_replay_iterations = server_config['server_replay_config']['server_iterations']
self.server_trainer = Trainer(
model=model,
optimizer=None,
ss_scheduler=ss_scheduler,
train_dataloader=train_dataloader,
server_replay_config=server_config['server_replay_config'],
val_dataloader=None,
max_grad_norm=server_config['server_replay_config']\
.get('max_grad_norm',server_config['data_config']['train']\
.get('max_grad_norm',None)),
anneal_config=server_config['server_replay_config'].get('annealing_config', None)
)
self.skip_model_update = False # will not update the model if True
self.train_loss = 0.0
self.model_path = model_path
self.best_model_criterion = server_config['best_model_criterion']
self.fall_back_to_best_model = server_config['fall_back_to_best_model']
self.last_model_path = os.path.join(self.model_path, 'latest_model.tar')
self.best_model_path = os.path.join(self.model_path,
'best_val_{}_model.tar'.format(self.best_model_criterion))
self.log_path = os.path.join(self.model_path, 'status_log.json')
self.cur_iter_no = 0 # keep the iteration number for Tensor board plotting
self.lr_weight = 1.0
self.losses = []
self.no_label_updates = 0 # no. label updates
# Update the parameters above if the log file
if server_config.get('resume_from_checkpoint', False):
self.load_saved_status()
# Decoding config
self.decoder_config = decoder_config
self.spm_model = server_config['data_config']['test'].get('spm_model', None)
self.do_profiling = server_config.get('do_profiling', False)
# Parallel processing
self.clients_in_parallel = config['client_config'].get('clients_in_parallel', None)
StrategyClass = select_strategy(config['strategy'])
self.strategy = StrategyClass('server', self.config, self.model_path)
print_rank(f'Server successfully instantiated strategy {self.strategy}', loglevel=logging.DEBUG)
def load_saved_status(self):
'''Load checkpoint from disk'''
# Check if model is on disk, if so loads it onto trainer
if os.path.exists(self.last_model_path):
print_rank('Resuming from checkpoint model {}'.format(self.last_model_path))
self.worker_trainer.load(self.last_model_path, update_lr_scheduler=True, update_ss_scheduler=True)
if self.server_trainer is not None:
self.server_trainer.model = self.worker_trainer.model # make sure that the models are in sync
# Check if log is on disk, if so loads it onto current stats
if os.path.exists(self.log_path):
with open(self.log_path, 'r') as logfp: # loading the iteration no., best loss and CER
elems = json.load(logfp)
self.cur_iter_no = elems.get('i', 0)
self.metrics['best_val_loss'] = elems.get('best_val_loss', float('inf'))
self.metrics['best_val_acc'] = elems.get('best_val_acc', 0)
self.metrics['best_test_loss'] = elems.get('best_test_loss', float('inf'))
self.metrics['best_test_acc'] = elems.get('best_test_acc', 0)
self.lr_weight = elems.get('weight', 1.0)
self.no_label_updates = elems.get('num_label_updates', 0)
print_rank(f'Resuming from status_log: cur_iter: {self.cur_iter_no}')
def run(self):
'''Trigger training.
This is a simple wrapper to the `train` method.
'''
print_rank('server started')
self.train()
print_rank('server terminated')
def train(self):
'''Main method for training.'''
self.run_stats = {
'secsPerClientRound': [],
'secsPerClient': [],
'secsPerClientTraining': [],
'secsPerClientSetup': [],
'secsPerClientFull': [],
'secsPerRoundHousekeeping': [],
'secsPerRoundTotal': [],
'mpiCosts': []
}
run.log('Max iterations', self.max_iteration)
try:
self.worker_trainer.model.cuda() if torch.cuda.is_available() else None
# Do an initial validation round to understand the pretrained model's validation accuracy
# Skip if we resumed from a checkpoint (cur_iter_no > 0)
eval_list = []
if self.cur_iter_no == 0:
if self.config['server_config']['initial_rec']:
eval_list.append('test')
if self.config['server_config']['initial_val']:
eval_list.append('val')
run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))
print_rank("Running {} at itr={}".format(eval_list, self.cur_iter_no))
self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)
eval_list = [] # some cleanup
# Dump all the information in aggregate_metric
print_rank('Saving Model Before Starting Training', loglevel=logging.INFO)
for token in ['best_val_loss', 'best_val_acc', 'best_test_acc', 'latest']:
self.worker_trainer.save(
model_path=self.model_path,
token=token,
config=self.config['server_config']
)
# Training loop
self.worker_trainer.model.train()
for i in range(self.cur_iter_no, self.max_iteration):
begin = time.time()
metrics_payload = {}
def log_metric(k, v):
metrics_payload[k] = v
print_rank('==== iteration {}'.format(i))
log_metric('Current iteration', i)
# Initial value for the learning rate of the worker
initial_lr = self.initial_lr_client * self.lr_weight
print_rank('Client learning rate {}'.format(initial_lr))
# Run training on clients
self.worker_trainer.model.zero_grad()
self.train_loss = []
server_data = (
initial_lr,
[p.data.to(torch.device('cpu')) for p in self.worker_trainer.model.parameters()]
)
# Random number of clients per iteration
if len(self.num_clients_per_iteration) > 1:
num_clients_curr_iter = random.randint(
self.num_clients_per_iteration[0],
self.num_clients_per_iteration[1]
)
else:
num_clients_curr_iter = self.num_clients_per_iteration[0]
log_metric('Clients for round', num_clients_curr_iter)
# Perform annealing in quantization threshold
if self.quant_thresh is not None:
self.config['client_config']['quant_thresh'] *= self.config['client_config'].get('quant_anneal', 1.0)
self.quant_thresh = self.config['client_config']['quant_thresh']
log_metric('Quantization Thresh.', self.config['client_config']['quant_thresh'])
# Create the pool of clients -- sample from this pool to assign to workers
sampled_idx_clients = random.sample(self.client_idx_list,
num_clients_curr_iter) if num_clients_curr_iter > 0 else self.client_idx_list
sampled_clients = [
Client(
client_id,
self.config,
self.config['client_config']['type'] == 'optimization',
None
) for client_id in sampled_idx_clients
]
# Initialize stats
clients_begin = time.time()
client_losses = []
client_mag_grads = []
client_mean_grads = []
client_var_grads = []
client_norm_grads = []
self.run_stats['secsPerClient'].append([])
self.run_stats['secsPerClientFull'].append([])
self.run_stats['secsPerClientTraining'].append([])
self.run_stats['secsPerClientSetup'].append([])
self.run_stats['mpiCosts'].append([])
# Check if we want privacy metrics
apply_privacy_metrics = self.config.get('privacy_metrics_config', None) and \
self.config['privacy_metrics_config']['apply_metrics']
adaptive_leakage = apply_privacy_metrics and \
self.config['privacy_metrics_config'].get('adaptive_leakage_threshold', None)
if apply_privacy_metrics:
privacy_metrics_stats = defaultdict(list)
# Initialize profiler
profiler = None
if self.do_profiling:
profiler = cProfile.Profile()
profiler.enable()
# Reset gradient for the model before assigning the new gradients
self.worker_trainer.model.zero_grad()
for client_output in self.process_clients(sampled_clients, server_data, self.clients_in_parallel):
# Process client output
client_timestamp = client_output['ts']
client_stats = client_output['cs']
client_loss = client_output['tl']
client_mag_grad = client_output['mg']
client_mean_grad = client_output['ng']
client_var_grad = client_output['vg']
client_norm_grad = client_output['rg']
client_payload = client_output['pl']
if apply_privacy_metrics:
privacy_stats = client_output['ps']
for metric, value in privacy_stats.items():
privacy_metrics_stats[metric].append(value)
self.run_stats['mpiCosts'][-1].append(time.time() - client_timestamp)
# Get actual pseudo-gradients for aggregation
payload_processed = self.strategy.process_individual_payload(self.worker_trainer, client_payload)
if not payload_processed:
print_rank('Dropping client', loglevel=logging.DEBUG)
num_clients_curr_iter -= 1
continue
# Aggregate stats
self.train_loss.append(client_loss)
client_losses.append(client_loss)
client_mag_grads.append(client_mag_grad.item())
client_mean_grads.append(client_mean_grad.item())
client_var_grads.append(client_var_grad.item())
client_norm_grads.append(client_norm_grad.item())
# Mark the end of client processing
client_end = time.time()
self.run_stats['secsPerClientFull'][-1].append(client_stats['full cost'])
self.run_stats['secsPerClientTraining'][-1].append(client_stats['training'])
self.run_stats['secsPerClientSetup'][-1].append(client_stats['setup'])
self.run_stats['secsPerClient'][-1].append(client_end - clients_begin)
# Tear down profiler
if self.do_profiling:
profiler.disable()
stats = pstats.Stats(profiler)
stats.sort_stats('cumulative').print_stats()
# Prepare output
client_mag_grads = np.array(client_mag_grads)
client_mean_grads = np.array(client_mean_grads)
client_var_grads = np.array(client_var_grads)
client_norm_grads = np.array(client_norm_grads)
client_stats = (client_mag_grads, client_mean_grads, client_var_grads)
dump_norm_stats = self.config.get('dump_norm_stats', False)
if dump_norm_stats:
with open(os.path.join(self.model_path, 'norm_stats.txt'), 'a', encoding='utf-8') as outF:
outF.write('{}\n'.format(json.dumps(list(client_norm_grads))))
# Print the privacy metrics
if apply_privacy_metrics:
for metric, values in privacy_metrics_stats.items():
if metric == 'Dropped clients':
log_metric(metric, sum(values))
else:
log_metric(metric, max(values))
if type(adaptive_leakage) is float:
values = privacy_metrics_stats['Practical epsilon (Max leakage)']
new_threshold = list(sorted(values))[int(adaptive_leakage*len(values))]
print_rank('Updating leakage threshold to {}'.format(new_threshold))
self.config['privacy_metrics_config']['max_allowed_leakage'] = new_threshold
# Mark that all clients have been processed
end = time.time()
self.run_stats['secsPerClientRound'].append(end - begin)
begin = end
# Log the training loss to tensorboard/AML
log_metric('Training loss', sum(self.train_loss))
# Combine payloads
self.losses = self.strategy.combine_payloads(
worker_trainer=self.worker_trainer,
curr_iter=i,
num_clients_curr_iter=num_clients_curr_iter,
client_stats=client_stats,
logger=log_metric,
)
# Run a couple of iterations of training data on the server
if self.server_trainer is not None:
print_rank('Running replay iterations on server')
if 'updatable_names' in self.server_trainer_config:
set_component_wise_lr(
self.worker_trainer.model,
self.server_optimizer_config,
self.server_trainer_config['updatable_names']
)
self.server_trainer.prepare_iteration(self.worker_trainer.model)
self.server_trainer.train_desired_samples(self.server_replay_iterations)
self.worker_trainer.model.load_state_dict(self.server_trainer.model.state_dict())
torch.cuda.empty_cache()
# Update a sampling scheduler
print_rank('Run ss scheduler')
self.worker_trainer.run_ss_scheduler()
# Run inference and score on val/test depending on the iter. number
if ((i+1) % self.val_freq) == 0:
eval_list.append("val")
if ((i+1) % self.req_freq) == 0 :
eval_list.append("test")
if len(eval_list)> 0:
print_rank('Running {} at itr={}'.format(eval_list,i+1))
self.metrics['worker_trainer'] = self.worker_trainer
self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)
self.losses = self.evaluation.losses
eval_list = []
# Create a schedule for the initial_lr (for the worker)
if 'val' in eval_list:
run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))
if not (self.losses[0] < self.metrics['best_val_loss']):
self.lr_weight *= self.lr_decay_factor
print_rank('LOG: Client weight of learning rate {}..'.format(self.lr_weight))
# Backup the current best models
self.backup_models(i)
# Fall back to the best model if the option is enabled
self.fall_back_to_prev_best_status()
# Logging the latest best values
update_json_log(
self.log_path,
{
'i': i + 1,
'best_val_loss': float(self.metrics['best_val_loss']),
'best_val_acc': float(self.metrics['best_val_acc']),
'best_test_loss': float(self.metrics['best_test_loss']),
'best_test_acc': float(self.metrics['best_test_acc']),
'weight': float(self.lr_weight),
'num_label_updates': int(self.no_label_updates)
},
)
end = time.time()
# Aggregate stats
self.run_stats['secsPerRoundHousekeeping'].append(end - begin)
self.run_stats['secsPerRoundTotal'].append(self.run_stats['secsPerClientRound'][-1] + \
self.run_stats['secsPerRoundHousekeeping'][-1])
log_metric('secsPerRoundTotal', self.run_stats['secsPerRoundTotal'][-1])
if self.do_profiling:
log_metric('secsPerClientRound', self.run_stats['secsPerClientRound'][-1])
log_metric('secsPerRoundHousekeeping', self.run_stats['secsPerRoundHousekeeping'][-1])
metrics_for_stats = [
'secsPerClient',
'secsPerClientTraining',
'secsPerClientFull',
'secsPerClientSetup',
'mpiCosts',
]
for metric in metrics_for_stats:
log_metric(f'{metric}Mean', np.mean(self.run_stats[metric][-1]))
log_metric(f'{metric}Median', np.median(self.run_stats[metric][-1]))
log_metric(f'{metric}Max', max(self.run_stats[metric][-1]))
for k in self.run_stats:
if k in metrics_for_stats:
print_rank('{}: {}'.format(k, max(self.run_stats[k][-1])), loglevel=logging.DEBUG)
else:
print_rank('{}: {}'.format(k, self.run_stats[k][-1]), loglevel=logging.DEBUG)
# Log all the metrics
for k in metrics_payload:
run.log(k, metrics_payload[k])
finally: # perform cleanup even if error was raised above
self.terminate_workers(terminate=(not self.do_clustering))
def backup_models(self, i):
'''Save the current best models.
Save CER model, the best loss model and the best WER model. This occurs
at a specified period.
Args:
i: no. of iterations.
'''
# Always save the latest model
self.worker_trainer.save(
model_path=self.model_path,
token='latest',
config=self.config['server_config'],
)
if (i % self.model_backup_freq) == 0: # save the current best models
self.worker_trainer.save(
model_path=self.model_path,
token='epoch{}'.format(i),
config=self.config['server_config']
)
for bodyname in ['best_val_acc', 'best_val_loss', 'best_test_acc']:
src_model_path = os.path.join(self.model_path, '{}_model.tar'.format(bodyname))
if os.path.exists(src_model_path):
dst_model_path = os.path.join(self.model_path, 'epoch{}_{}_model.tar'.format(i, bodyname))
shutil.copyfile(src_model_path, dst_model_path)
print_rank('Saved {}'.format(dst_model_path))
def fall_back_to_prev_best_status(self):
'''Go back to the past best status and switch to the recent best model.'''
if self.fall_back_to_best_model:
print_rank('falling back to model {}'.format(self.best_model_path))
# Save current learning rate
tmp_lr = get_lr(self.worker_trainer.optimizer)
# Load previous best model
self.worker_trainer.load(self.best_model_path, update_lr_scheduler=False, update_ss_scheduler=False)
# Update previous learning rate on optimizer
for g in self.worker_trainer.optimizer.param_groups:
g['lr'] = tmp_lr
if self.server_trainer is not None:
self.server_trainer.model = self.worker_trainer.model # make sure that the models are in sync
def select_server(server_type, config):
'''Select a server type using different possible strings.
Right now this just returns `OptimizationServer`, but this
function could be useful when there are multiple choices of
server.
Args:
server_type (str): indicates server choice.
config (dict): config parsed from YAML, passed so that
parameters can be used to select a given server.
'''
return OptimizationServer
|
approximate_predict | Predict the cluster label of new points. The returned labels
will be those of the original clustering found by ``clusterer``,
and therefore are not (necessarily) the cluster labels that would
be found by clustering the original data combined with
``points_to_predict``, hence the 'approximate' label.
If you simply wish to assign new points to an existing clustering
in the 'best' way possible, this is the function to use. If you
want to predict how ``points_to_predict`` would cluster with
the original data under HDBSCAN the most efficient existing approach
is to simply recluster with the new point(s) added to the original dataset.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
labels : array (n_samples,)
The predicted labels of the ``points_to_predict``
probabilities : array (n_samples,)
The soft cluster scores for each of the ``points_to_predict``
See Also
--------
:py:func:`hdbscan.predict.membership_vector`
:py:func:`hdbscan.predict.all_points_membership_vectors` | # Support various prediction methods for predicting cluster membership
# of new or unseen points. There are several ways to interpret how
# to do this correctly, so we provide several methods for
# the different use cases that may arise.
import numpy as np
from sklearn.neighbors import KDTree, BallTree
from .dist_metrics import DistanceMetric
from ._hdbscan_tree import compute_stability, labelling_at_cut, recurse_leaf_dfs
from ._prediction_utils import (get_tree_row_with_child,
dist_membership_vector,
outlier_membership_vector,
prob_in_some_cluster,
all_points_dist_membership_vector,
all_points_outlier_membership_vector,
all_points_prob_in_some_cluster)
from warnings import warn
class PredictionData(object):
"""
Extra data that allows for faster prediction if cached.
Parameters
----------
data : array (n_samples, n_features)
The original data set that was clustered
condensed_tree : CondensedTree
The condensed tree object created by a clustering
min_samples : int
The min_samples value used in clustering
tree_type : string, optional
Which type of space tree to use for core distance computation.
One of:
* ``kdtree``
* ``balltree``
metric : string, optional
The metric used to determine distance for the clustering.
This is the metric that will be used for the space tree to determine
core distances etc.
**kwargs :
Any further arguments to the metric.
Attributes
----------
raw_data : array (n_samples, n_features)
The original data set that was clustered
tree : KDTree or BallTree
A space partitioning tree that can be queried for nearest neighbors.
core_distances : array (n_samples,)
The core distances for every point in the original data set.
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
cluster_tree : structured array
A version of the condensed tree that only contains clusters, not
individual points.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
"""
_tree_type_map = {'kdtree': KDTree, 'balltree': BallTree}
def _clusters_below(self, cluster):
result = []
to_process = [cluster]
while to_process:
result.extend(to_process)
to_process = \
self.cluster_tree['child'][np.in1d(self.cluster_tree['parent'],
to_process)]
to_process = to_process.tolist()
return result
def _recurse_leaf_dfs(self, current_node):
children = self.cluster_tree[self.cluster_tree['parent'] ==
current_node]['child']
if len(children) == 0:
return [current_node, ]
else:
return sum(
[recurse_leaf_dfs(self.cluster_tree, child) for child in children], [])
def __init__(self, data, condensed_tree, min_samples,
tree_type='kdtree', metric='euclidean', **kwargs):
self.raw_data = data
self.tree = self._tree_type_map[tree_type](self.raw_data,
metric=metric, **kwargs)
self.core_distances = self.tree.query(data, k=min_samples)[0][:, -1]
self.dist_metric = DistanceMetric.get_metric(metric, **kwargs)
selected_clusters = condensed_tree._select_clusters()
# raw_condensed_tree = condensed_tree.to_numpy()
raw_condensed_tree = condensed_tree._raw_tree
self.cluster_map = {c: n for n, c in enumerate(sorted(list(selected_clusters)))}
self.reverse_cluster_map = {n: c for c, n in self.cluster_map.items()}
self.cluster_tree = raw_condensed_tree[raw_condensed_tree['child_size']
> 1]
self.max_lambdas = {}
self.leaf_max_lambdas = {}
self.exemplars = []
all_clusters = set(np.hstack([self.cluster_tree['parent'],
self.cluster_tree['child']]))
for cluster in all_clusters:
self.leaf_max_lambdas[cluster] = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == cluster].max()
for cluster in selected_clusters:
self.max_lambdas[cluster] = \
raw_condensed_tree['lambda_val'][raw_condensed_tree['parent']
== cluster].max()
for sub_cluster in self._clusters_below(cluster):
self.cluster_map[sub_cluster] = self.cluster_map[cluster]
self.max_lambdas[sub_cluster] = self.max_lambdas[cluster]
cluster_exemplars = np.array([], dtype=np.int64)
for leaf in self._recurse_leaf_dfs(cluster):
leaf_max_lambda = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == leaf].max()
points = raw_condensed_tree['child'][
(raw_condensed_tree['parent'] == leaf) &
(raw_condensed_tree['lambda_val'] == leaf_max_lambda)]
cluster_exemplars = np.hstack([cluster_exemplars, points])
self.exemplars.append(self.raw_data[cluster_exemplars])
def _find_neighbor_and_lambda(neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Find the nearest mutual reachability neighbor of a point, and compute
the associated lambda value for the point, given the mutual reachability
distance to a nearest neighbor.
Parameters
----------
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
neighbor : int
The index into the full raw data set of the nearest mutual reachability
distance neighbor of the point.
lambda_ : float
The lambda value at which this point joins/merges with `neighbor`.
"""
neighbor_core_distances = core_distances[neighbor_indices]
point_core_distances = neighbor_distances[min_samples] * np.ones(
neighbor_indices.shape[0])
mr_distances = np.vstack((
neighbor_core_distances,
point_core_distances,
neighbor_distances
)).max(axis=0)
nn_index = mr_distances.argmin()
nearest_neighbor = neighbor_indices[nn_index]
if mr_distances[nn_index] > 0.0:
lambda_ = 1. / mr_distances[nn_index]
else:
lambda_ = np.finfo(np.double).max
return nearest_neighbor, lambda_
def _extend_condensed_tree(tree, neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Create a new condensed tree with an additional point added, allowing for
computations as if this point had been part of the original tree. Note
that this makes as little change to the tree as possible, with no
re-optimizing/re-condensing so that the selected clusters remain
effectively unchanged.
Parameters
----------
tree : structured array
The raw format condensed tree to update.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
new_tree : structured array
The original tree with an extra row providing the parent cluster
and lambda information for a new point given index -1.
"""
tree_root = tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] <= lambda_:
# New point departs with the old
new_tree_row = (potential_cluster, -1, 1,
neighbor_tree_row['lambda_val'])
else:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
tree[tree['child'] ==
potential_cluster]['lambda_val'] >= lambda_:
potential_cluster = tree['parent'][tree['child']
== potential_cluster][0]
new_tree_row = (potential_cluster, -1, 1, lambda_)
return np.append(tree, new_tree_row)
def _find_cluster_and_probability(tree, cluster_tree, neighbor_indices,
neighbor_distances, core_distances,
cluster_map, max_lambdas,
min_samples):
"""
Return the cluster label (of the original clustering) and membership
probability of a new data point.
Parameters
----------
tree : CondensedTree
The condensed tree associated with the clustering.
cluster_tree : structured_array
The raw form of the condensed tree with only cluster information (no
data on individual points). This is significantly more compact.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
min_samples : int
The min_samples value used to generate core distances.
"""
raw_tree = tree._raw_tree
tree_root = cluster_tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(raw_tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] > lambda_:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
cluster_tree['lambda_val'][cluster_tree['child']
== potential_cluster] >= lambda_:
potential_cluster = cluster_tree['parent'][cluster_tree['child']
== potential_cluster][0]
if potential_cluster in cluster_map:
cluster_label = cluster_map[potential_cluster]
else:
cluster_label = -1
if cluster_label >= 0:
max_lambda = max_lambdas[potential_cluster]
if max_lambda > 0.0:
lambda_ = min(max_lambda, lambda_)
prob = (lambda_ / max_lambda)
else:
prob = 1.0
else:
prob = 0.0
return cluster_label, prob
# MASKED: approximate_predict function (lines 334-413)
def membership_vector(clusterer, points_to_predict):
"""Predict soft cluster membership. The result produces a vector
for each point in ``points_to_predict`` that gives a probability that
the given point is a member of a cluster for each of the selected clusters
of the ``clusterer``.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` is a member of cluster ``j`` is
in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(
sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
result = np.empty((points_to_predict.shape[0], clusters.shape[0]),
dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
# We need to find where in the tree the new point would go
# for the purposes of outlier membership approximation
nearest_neighbor, lambda_ = \
_find_neighbor_and_lambda(
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
min_samples)
neighbor_tree_row = get_tree_row_with_child(
clusterer.condensed_tree_._raw_tree, nearest_neighbor)
if neighbor_tree_row['lambda_val'] <= lambda_:
lambda_ = neighbor_tree_row['lambda_val']
distance_vec = dist_membership_vector(
points_to_predict[i],
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vec = outlier_membership_vector(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result[i] = distance_vec ** 0.5 * outlier_vec ** 2.0
result[i] /= result[i].sum()
result[i] *= prob_in_some_cluster(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
return result
def all_points_membership_vectors(clusterer):
"""Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
all_points = clusterer.prediction_data_.raw_data
# When no clusters found, return array of 0's
if clusters.size == 0:
return np.zeros(all_points.shape[0])
distance_vecs = all_points_dist_membership_vector(
all_points,
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vecs = all_points_outlier_membership_vector(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
in_cluster_probs = all_points_prob_in_some_cluster(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
return result | def approximate_predict(clusterer, points_to_predict):
"""Predict the cluster label of new points. The returned labels
will be those of the original clustering found by ``clusterer``,
and therefore are not (necessarily) the cluster labels that would
be found by clustering the original data combined with
``points_to_predict``, hence the 'approximate' label.
If you simply wish to assign new points to an existing clustering
in the 'best' way possible, this is the function to use. If you
want to predict how ``points_to_predict`` would cluster with
the original data under HDBSCAN the most efficient existing approach
is to simply recluster with the new point(s) added to the original dataset.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
labels : array (n_samples,)
The predicted labels of the ``points_to_predict``
probabilities : array (n_samples,)
The soft cluster scores for each of the ``points_to_predict``
See Also
--------
:py:func:`hdbscan.predict.membership_vector`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
if clusterer.prediction_data_ is None:
raise ValueError('Clusterer does not have prediction data!'
' Try fitting with prediction_data=True set,'
' or run generate_prediction_data on the clusterer')
points_to_predict = np.asarray(points_to_predict)
if points_to_predict.shape[1] != \
clusterer.prediction_data_.raw_data.shape[1]:
raise ValueError('New points dimension does not match fit data!')
if clusterer.prediction_data_.cluster_tree.shape[0] == 0:
warn('Clusterer does not have any defined clusters, new data'
' will be automatically predicted as noise.')
labels = -1 * np.ones(points_to_predict.shape[0], dtype=np.int32)
probabilities = np.zeros(points_to_predict.shape[0], dtype=np.float32)
return labels, probabilities
labels = np.empty(points_to_predict.shape[0], dtype=np.int)
probabilities = np.empty(points_to_predict.shape[0], dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
label, prob = _find_cluster_and_probability(
clusterer.condensed_tree_,
clusterer.prediction_data_.cluster_tree,
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
clusterer.prediction_data_.cluster_map,
clusterer.prediction_data_.max_lambdas,
min_samples
)
labels[i] = label
probabilities[i] = prob
return labels, probabilities | 334 | 413 | # Support various prediction methods for predicting cluster membership
# of new or unseen points. There are several ways to interpret how
# to do this correctly, so we provide several methods for
# the different use cases that may arise.
import numpy as np
from sklearn.neighbors import KDTree, BallTree
from .dist_metrics import DistanceMetric
from ._hdbscan_tree import compute_stability, labelling_at_cut, recurse_leaf_dfs
from ._prediction_utils import (get_tree_row_with_child,
dist_membership_vector,
outlier_membership_vector,
prob_in_some_cluster,
all_points_dist_membership_vector,
all_points_outlier_membership_vector,
all_points_prob_in_some_cluster)
from warnings import warn
class PredictionData(object):
"""
Extra data that allows for faster prediction if cached.
Parameters
----------
data : array (n_samples, n_features)
The original data set that was clustered
condensed_tree : CondensedTree
The condensed tree object created by a clustering
min_samples : int
The min_samples value used in clustering
tree_type : string, optional
Which type of space tree to use for core distance computation.
One of:
* ``kdtree``
* ``balltree``
metric : string, optional
The metric used to determine distance for the clustering.
This is the metric that will be used for the space tree to determine
core distances etc.
**kwargs :
Any further arguments to the metric.
Attributes
----------
raw_data : array (n_samples, n_features)
The original data set that was clustered
tree : KDTree or BallTree
A space partitioning tree that can be queried for nearest neighbors.
core_distances : array (n_samples,)
The core distances for every point in the original data set.
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
cluster_tree : structured array
A version of the condensed tree that only contains clusters, not
individual points.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
"""
_tree_type_map = {'kdtree': KDTree, 'balltree': BallTree}
def _clusters_below(self, cluster):
result = []
to_process = [cluster]
while to_process:
result.extend(to_process)
to_process = \
self.cluster_tree['child'][np.in1d(self.cluster_tree['parent'],
to_process)]
to_process = to_process.tolist()
return result
def _recurse_leaf_dfs(self, current_node):
children = self.cluster_tree[self.cluster_tree['parent'] ==
current_node]['child']
if len(children) == 0:
return [current_node, ]
else:
return sum(
[recurse_leaf_dfs(self.cluster_tree, child) for child in children], [])
def __init__(self, data, condensed_tree, min_samples,
tree_type='kdtree', metric='euclidean', **kwargs):
self.raw_data = data
self.tree = self._tree_type_map[tree_type](self.raw_data,
metric=metric, **kwargs)
self.core_distances = self.tree.query(data, k=min_samples)[0][:, -1]
self.dist_metric = DistanceMetric.get_metric(metric, **kwargs)
selected_clusters = condensed_tree._select_clusters()
# raw_condensed_tree = condensed_tree.to_numpy()
raw_condensed_tree = condensed_tree._raw_tree
self.cluster_map = {c: n for n, c in enumerate(sorted(list(selected_clusters)))}
self.reverse_cluster_map = {n: c for c, n in self.cluster_map.items()}
self.cluster_tree = raw_condensed_tree[raw_condensed_tree['child_size']
> 1]
self.max_lambdas = {}
self.leaf_max_lambdas = {}
self.exemplars = []
all_clusters = set(np.hstack([self.cluster_tree['parent'],
self.cluster_tree['child']]))
for cluster in all_clusters:
self.leaf_max_lambdas[cluster] = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == cluster].max()
for cluster in selected_clusters:
self.max_lambdas[cluster] = \
raw_condensed_tree['lambda_val'][raw_condensed_tree['parent']
== cluster].max()
for sub_cluster in self._clusters_below(cluster):
self.cluster_map[sub_cluster] = self.cluster_map[cluster]
self.max_lambdas[sub_cluster] = self.max_lambdas[cluster]
cluster_exemplars = np.array([], dtype=np.int64)
for leaf in self._recurse_leaf_dfs(cluster):
leaf_max_lambda = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == leaf].max()
points = raw_condensed_tree['child'][
(raw_condensed_tree['parent'] == leaf) &
(raw_condensed_tree['lambda_val'] == leaf_max_lambda)]
cluster_exemplars = np.hstack([cluster_exemplars, points])
self.exemplars.append(self.raw_data[cluster_exemplars])
def _find_neighbor_and_lambda(neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Find the nearest mutual reachability neighbor of a point, and compute
the associated lambda value for the point, given the mutual reachability
distance to a nearest neighbor.
Parameters
----------
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
neighbor : int
The index into the full raw data set of the nearest mutual reachability
distance neighbor of the point.
lambda_ : float
The lambda value at which this point joins/merges with `neighbor`.
"""
neighbor_core_distances = core_distances[neighbor_indices]
point_core_distances = neighbor_distances[min_samples] * np.ones(
neighbor_indices.shape[0])
mr_distances = np.vstack((
neighbor_core_distances,
point_core_distances,
neighbor_distances
)).max(axis=0)
nn_index = mr_distances.argmin()
nearest_neighbor = neighbor_indices[nn_index]
if mr_distances[nn_index] > 0.0:
lambda_ = 1. / mr_distances[nn_index]
else:
lambda_ = np.finfo(np.double).max
return nearest_neighbor, lambda_
def _extend_condensed_tree(tree, neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Create a new condensed tree with an additional point added, allowing for
computations as if this point had been part of the original tree. Note
that this makes as little change to the tree as possible, with no
re-optimizing/re-condensing so that the selected clusters remain
effectively unchanged.
Parameters
----------
tree : structured array
The raw format condensed tree to update.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
new_tree : structured array
The original tree with an extra row providing the parent cluster
and lambda information for a new point given index -1.
"""
tree_root = tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] <= lambda_:
# New point departs with the old
new_tree_row = (potential_cluster, -1, 1,
neighbor_tree_row['lambda_val'])
else:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
tree[tree['child'] ==
potential_cluster]['lambda_val'] >= lambda_:
potential_cluster = tree['parent'][tree['child']
== potential_cluster][0]
new_tree_row = (potential_cluster, -1, 1, lambda_)
return np.append(tree, new_tree_row)
def _find_cluster_and_probability(tree, cluster_tree, neighbor_indices,
neighbor_distances, core_distances,
cluster_map, max_lambdas,
min_samples):
"""
Return the cluster label (of the original clustering) and membership
probability of a new data point.
Parameters
----------
tree : CondensedTree
The condensed tree associated with the clustering.
cluster_tree : structured_array
The raw form of the condensed tree with only cluster information (no
data on individual points). This is significantly more compact.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
min_samples : int
The min_samples value used to generate core distances.
"""
raw_tree = tree._raw_tree
tree_root = cluster_tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(raw_tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] > lambda_:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
cluster_tree['lambda_val'][cluster_tree['child']
== potential_cluster] >= lambda_:
potential_cluster = cluster_tree['parent'][cluster_tree['child']
== potential_cluster][0]
if potential_cluster in cluster_map:
cluster_label = cluster_map[potential_cluster]
else:
cluster_label = -1
if cluster_label >= 0:
max_lambda = max_lambdas[potential_cluster]
if max_lambda > 0.0:
lambda_ = min(max_lambda, lambda_)
prob = (lambda_ / max_lambda)
else:
prob = 1.0
else:
prob = 0.0
return cluster_label, prob
def approximate_predict(clusterer, points_to_predict):
"""Predict the cluster label of new points. The returned labels
will be those of the original clustering found by ``clusterer``,
and therefore are not (necessarily) the cluster labels that would
be found by clustering the original data combined with
``points_to_predict``, hence the 'approximate' label.
If you simply wish to assign new points to an existing clustering
in the 'best' way possible, this is the function to use. If you
want to predict how ``points_to_predict`` would cluster with
the original data under HDBSCAN the most efficient existing approach
is to simply recluster with the new point(s) added to the original dataset.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
labels : array (n_samples,)
The predicted labels of the ``points_to_predict``
probabilities : array (n_samples,)
The soft cluster scores for each of the ``points_to_predict``
See Also
--------
:py:func:`hdbscan.predict.membership_vector`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
if clusterer.prediction_data_ is None:
raise ValueError('Clusterer does not have prediction data!'
' Try fitting with prediction_data=True set,'
' or run generate_prediction_data on the clusterer')
points_to_predict = np.asarray(points_to_predict)
if points_to_predict.shape[1] != \
clusterer.prediction_data_.raw_data.shape[1]:
raise ValueError('New points dimension does not match fit data!')
if clusterer.prediction_data_.cluster_tree.shape[0] == 0:
warn('Clusterer does not have any defined clusters, new data'
' will be automatically predicted as noise.')
labels = -1 * np.ones(points_to_predict.shape[0], dtype=np.int32)
probabilities = np.zeros(points_to_predict.shape[0], dtype=np.float32)
return labels, probabilities
labels = np.empty(points_to_predict.shape[0], dtype=np.int)
probabilities = np.empty(points_to_predict.shape[0], dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
label, prob = _find_cluster_and_probability(
clusterer.condensed_tree_,
clusterer.prediction_data_.cluster_tree,
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
clusterer.prediction_data_.cluster_map,
clusterer.prediction_data_.max_lambdas,
min_samples
)
labels[i] = label
probabilities[i] = prob
return labels, probabilities
def membership_vector(clusterer, points_to_predict):
"""Predict soft cluster membership. The result produces a vector
for each point in ``points_to_predict`` that gives a probability that
the given point is a member of a cluster for each of the selected clusters
of the ``clusterer``.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` is a member of cluster ``j`` is
in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(
sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
result = np.empty((points_to_predict.shape[0], clusters.shape[0]),
dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
# We need to find where in the tree the new point would go
# for the purposes of outlier membership approximation
nearest_neighbor, lambda_ = \
_find_neighbor_and_lambda(
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
min_samples)
neighbor_tree_row = get_tree_row_with_child(
clusterer.condensed_tree_._raw_tree, nearest_neighbor)
if neighbor_tree_row['lambda_val'] <= lambda_:
lambda_ = neighbor_tree_row['lambda_val']
distance_vec = dist_membership_vector(
points_to_predict[i],
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vec = outlier_membership_vector(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result[i] = distance_vec ** 0.5 * outlier_vec ** 2.0
result[i] /= result[i].sum()
result[i] *= prob_in_some_cluster(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
return result
def all_points_membership_vectors(clusterer):
"""Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
all_points = clusterer.prediction_data_.raw_data
# When no clusters found, return array of 0's
if clusters.size == 0:
return np.zeros(all_points.shape[0])
distance_vecs = all_points_dist_membership_vector(
all_points,
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vecs = all_points_outlier_membership_vector(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
in_cluster_probs = all_points_prob_in_some_cluster(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
return result
|
all_points_membership_vectors | Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors` | # Support various prediction methods for predicting cluster membership
# of new or unseen points. There are several ways to interpret how
# to do this correctly, so we provide several methods for
# the different use cases that may arise.
import numpy as np
from sklearn.neighbors import KDTree, BallTree
from .dist_metrics import DistanceMetric
from ._hdbscan_tree import compute_stability, labelling_at_cut, recurse_leaf_dfs
from ._prediction_utils import (get_tree_row_with_child,
dist_membership_vector,
outlier_membership_vector,
prob_in_some_cluster,
all_points_dist_membership_vector,
all_points_outlier_membership_vector,
all_points_prob_in_some_cluster)
from warnings import warn
class PredictionData(object):
"""
Extra data that allows for faster prediction if cached.
Parameters
----------
data : array (n_samples, n_features)
The original data set that was clustered
condensed_tree : CondensedTree
The condensed tree object created by a clustering
min_samples : int
The min_samples value used in clustering
tree_type : string, optional
Which type of space tree to use for core distance computation.
One of:
* ``kdtree``
* ``balltree``
metric : string, optional
The metric used to determine distance for the clustering.
This is the metric that will be used for the space tree to determine
core distances etc.
**kwargs :
Any further arguments to the metric.
Attributes
----------
raw_data : array (n_samples, n_features)
The original data set that was clustered
tree : KDTree or BallTree
A space partitioning tree that can be queried for nearest neighbors.
core_distances : array (n_samples,)
The core distances for every point in the original data set.
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
cluster_tree : structured array
A version of the condensed tree that only contains clusters, not
individual points.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
"""
_tree_type_map = {'kdtree': KDTree, 'balltree': BallTree}
def _clusters_below(self, cluster):
result = []
to_process = [cluster]
while to_process:
result.extend(to_process)
to_process = \
self.cluster_tree['child'][np.in1d(self.cluster_tree['parent'],
to_process)]
to_process = to_process.tolist()
return result
def _recurse_leaf_dfs(self, current_node):
children = self.cluster_tree[self.cluster_tree['parent'] ==
current_node]['child']
if len(children) == 0:
return [current_node, ]
else:
return sum(
[recurse_leaf_dfs(self.cluster_tree, child) for child in children], [])
def __init__(self, data, condensed_tree, min_samples,
tree_type='kdtree', metric='euclidean', **kwargs):
self.raw_data = data
self.tree = self._tree_type_map[tree_type](self.raw_data,
metric=metric, **kwargs)
self.core_distances = self.tree.query(data, k=min_samples)[0][:, -1]
self.dist_metric = DistanceMetric.get_metric(metric, **kwargs)
selected_clusters = condensed_tree._select_clusters()
# raw_condensed_tree = condensed_tree.to_numpy()
raw_condensed_tree = condensed_tree._raw_tree
self.cluster_map = {c: n for n, c in enumerate(sorted(list(selected_clusters)))}
self.reverse_cluster_map = {n: c for c, n in self.cluster_map.items()}
self.cluster_tree = raw_condensed_tree[raw_condensed_tree['child_size']
> 1]
self.max_lambdas = {}
self.leaf_max_lambdas = {}
self.exemplars = []
all_clusters = set(np.hstack([self.cluster_tree['parent'],
self.cluster_tree['child']]))
for cluster in all_clusters:
self.leaf_max_lambdas[cluster] = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == cluster].max()
for cluster in selected_clusters:
self.max_lambdas[cluster] = \
raw_condensed_tree['lambda_val'][raw_condensed_tree['parent']
== cluster].max()
for sub_cluster in self._clusters_below(cluster):
self.cluster_map[sub_cluster] = self.cluster_map[cluster]
self.max_lambdas[sub_cluster] = self.max_lambdas[cluster]
cluster_exemplars = np.array([], dtype=np.int64)
for leaf in self._recurse_leaf_dfs(cluster):
leaf_max_lambda = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == leaf].max()
points = raw_condensed_tree['child'][
(raw_condensed_tree['parent'] == leaf) &
(raw_condensed_tree['lambda_val'] == leaf_max_lambda)]
cluster_exemplars = np.hstack([cluster_exemplars, points])
self.exemplars.append(self.raw_data[cluster_exemplars])
def _find_neighbor_and_lambda(neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Find the nearest mutual reachability neighbor of a point, and compute
the associated lambda value for the point, given the mutual reachability
distance to a nearest neighbor.
Parameters
----------
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
neighbor : int
The index into the full raw data set of the nearest mutual reachability
distance neighbor of the point.
lambda_ : float
The lambda value at which this point joins/merges with `neighbor`.
"""
neighbor_core_distances = core_distances[neighbor_indices]
point_core_distances = neighbor_distances[min_samples] * np.ones(
neighbor_indices.shape[0])
mr_distances = np.vstack((
neighbor_core_distances,
point_core_distances,
neighbor_distances
)).max(axis=0)
nn_index = mr_distances.argmin()
nearest_neighbor = neighbor_indices[nn_index]
if mr_distances[nn_index] > 0.0:
lambda_ = 1. / mr_distances[nn_index]
else:
lambda_ = np.finfo(np.double).max
return nearest_neighbor, lambda_
def _extend_condensed_tree(tree, neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Create a new condensed tree with an additional point added, allowing for
computations as if this point had been part of the original tree. Note
that this makes as little change to the tree as possible, with no
re-optimizing/re-condensing so that the selected clusters remain
effectively unchanged.
Parameters
----------
tree : structured array
The raw format condensed tree to update.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
new_tree : structured array
The original tree with an extra row providing the parent cluster
and lambda information for a new point given index -1.
"""
tree_root = tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] <= lambda_:
# New point departs with the old
new_tree_row = (potential_cluster, -1, 1,
neighbor_tree_row['lambda_val'])
else:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
tree[tree['child'] ==
potential_cluster]['lambda_val'] >= lambda_:
potential_cluster = tree['parent'][tree['child']
== potential_cluster][0]
new_tree_row = (potential_cluster, -1, 1, lambda_)
return np.append(tree, new_tree_row)
def _find_cluster_and_probability(tree, cluster_tree, neighbor_indices,
neighbor_distances, core_distances,
cluster_map, max_lambdas,
min_samples):
"""
Return the cluster label (of the original clustering) and membership
probability of a new data point.
Parameters
----------
tree : CondensedTree
The condensed tree associated with the clustering.
cluster_tree : structured_array
The raw form of the condensed tree with only cluster information (no
data on individual points). This is significantly more compact.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
min_samples : int
The min_samples value used to generate core distances.
"""
raw_tree = tree._raw_tree
tree_root = cluster_tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(raw_tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] > lambda_:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
cluster_tree['lambda_val'][cluster_tree['child']
== potential_cluster] >= lambda_:
potential_cluster = cluster_tree['parent'][cluster_tree['child']
== potential_cluster][0]
if potential_cluster in cluster_map:
cluster_label = cluster_map[potential_cluster]
else:
cluster_label = -1
if cluster_label >= 0:
max_lambda = max_lambdas[potential_cluster]
if max_lambda > 0.0:
lambda_ = min(max_lambda, lambda_)
prob = (lambda_ / max_lambda)
else:
prob = 1.0
else:
prob = 0.0
return cluster_label, prob
def approximate_predict(clusterer, points_to_predict):
"""Predict the cluster label of new points. The returned labels
will be those of the original clustering found by ``clusterer``,
and therefore are not (necessarily) the cluster labels that would
be found by clustering the original data combined with
``points_to_predict``, hence the 'approximate' label.
If you simply wish to assign new points to an existing clustering
in the 'best' way possible, this is the function to use. If you
want to predict how ``points_to_predict`` would cluster with
the original data under HDBSCAN the most efficient existing approach
is to simply recluster with the new point(s) added to the original dataset.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
labels : array (n_samples,)
The predicted labels of the ``points_to_predict``
probabilities : array (n_samples,)
The soft cluster scores for each of the ``points_to_predict``
See Also
--------
:py:func:`hdbscan.predict.membership_vector`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
if clusterer.prediction_data_ is None:
raise ValueError('Clusterer does not have prediction data!'
' Try fitting with prediction_data=True set,'
' or run generate_prediction_data on the clusterer')
points_to_predict = np.asarray(points_to_predict)
if points_to_predict.shape[1] != \
clusterer.prediction_data_.raw_data.shape[1]:
raise ValueError('New points dimension does not match fit data!')
if clusterer.prediction_data_.cluster_tree.shape[0] == 0:
warn('Clusterer does not have any defined clusters, new data'
' will be automatically predicted as noise.')
labels = -1 * np.ones(points_to_predict.shape[0], dtype=np.int32)
probabilities = np.zeros(points_to_predict.shape[0], dtype=np.float32)
return labels, probabilities
labels = np.empty(points_to_predict.shape[0], dtype=np.int)
probabilities = np.empty(points_to_predict.shape[0], dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
label, prob = _find_cluster_and_probability(
clusterer.condensed_tree_,
clusterer.prediction_data_.cluster_tree,
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
clusterer.prediction_data_.cluster_map,
clusterer.prediction_data_.max_lambdas,
min_samples
)
labels[i] = label
probabilities[i] = prob
return labels, probabilities
def membership_vector(clusterer, points_to_predict):
"""Predict soft cluster membership. The result produces a vector
for each point in ``points_to_predict`` that gives a probability that
the given point is a member of a cluster for each of the selected clusters
of the ``clusterer``.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` is a member of cluster ``j`` is
in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(
sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
result = np.empty((points_to_predict.shape[0], clusters.shape[0]),
dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
# We need to find where in the tree the new point would go
# for the purposes of outlier membership approximation
nearest_neighbor, lambda_ = \
_find_neighbor_and_lambda(
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
min_samples)
neighbor_tree_row = get_tree_row_with_child(
clusterer.condensed_tree_._raw_tree, nearest_neighbor)
if neighbor_tree_row['lambda_val'] <= lambda_:
lambda_ = neighbor_tree_row['lambda_val']
distance_vec = dist_membership_vector(
points_to_predict[i],
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vec = outlier_membership_vector(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result[i] = distance_vec ** 0.5 * outlier_vec ** 2.0
result[i] /= result[i].sum()
result[i] *= prob_in_some_cluster(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
return result
# MASKED: all_points_membership_vectors function (lines 500-553) | def all_points_membership_vectors(clusterer):
"""Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
all_points = clusterer.prediction_data_.raw_data
# When no clusters found, return array of 0's
if clusters.size == 0:
return np.zeros(all_points.shape[0])
distance_vecs = all_points_dist_membership_vector(
all_points,
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vecs = all_points_outlier_membership_vector(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
in_cluster_probs = all_points_prob_in_some_cluster(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
return result | 500 | 553 | # Support various prediction methods for predicting cluster membership
# of new or unseen points. There are several ways to interpret how
# to do this correctly, so we provide several methods for
# the different use cases that may arise.
import numpy as np
from sklearn.neighbors import KDTree, BallTree
from .dist_metrics import DistanceMetric
from ._hdbscan_tree import compute_stability, labelling_at_cut, recurse_leaf_dfs
from ._prediction_utils import (get_tree_row_with_child,
dist_membership_vector,
outlier_membership_vector,
prob_in_some_cluster,
all_points_dist_membership_vector,
all_points_outlier_membership_vector,
all_points_prob_in_some_cluster)
from warnings import warn
class PredictionData(object):
"""
Extra data that allows for faster prediction if cached.
Parameters
----------
data : array (n_samples, n_features)
The original data set that was clustered
condensed_tree : CondensedTree
The condensed tree object created by a clustering
min_samples : int
The min_samples value used in clustering
tree_type : string, optional
Which type of space tree to use for core distance computation.
One of:
* ``kdtree``
* ``balltree``
metric : string, optional
The metric used to determine distance for the clustering.
This is the metric that will be used for the space tree to determine
core distances etc.
**kwargs :
Any further arguments to the metric.
Attributes
----------
raw_data : array (n_samples, n_features)
The original data set that was clustered
tree : KDTree or BallTree
A space partitioning tree that can be queried for nearest neighbors.
core_distances : array (n_samples,)
The core distances for every point in the original data set.
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
cluster_tree : structured array
A version of the condensed tree that only contains clusters, not
individual points.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
"""
_tree_type_map = {'kdtree': KDTree, 'balltree': BallTree}
def _clusters_below(self, cluster):
result = []
to_process = [cluster]
while to_process:
result.extend(to_process)
to_process = \
self.cluster_tree['child'][np.in1d(self.cluster_tree['parent'],
to_process)]
to_process = to_process.tolist()
return result
def _recurse_leaf_dfs(self, current_node):
children = self.cluster_tree[self.cluster_tree['parent'] ==
current_node]['child']
if len(children) == 0:
return [current_node, ]
else:
return sum(
[recurse_leaf_dfs(self.cluster_tree, child) for child in children], [])
def __init__(self, data, condensed_tree, min_samples,
tree_type='kdtree', metric='euclidean', **kwargs):
self.raw_data = data
self.tree = self._tree_type_map[tree_type](self.raw_data,
metric=metric, **kwargs)
self.core_distances = self.tree.query(data, k=min_samples)[0][:, -1]
self.dist_metric = DistanceMetric.get_metric(metric, **kwargs)
selected_clusters = condensed_tree._select_clusters()
# raw_condensed_tree = condensed_tree.to_numpy()
raw_condensed_tree = condensed_tree._raw_tree
self.cluster_map = {c: n for n, c in enumerate(sorted(list(selected_clusters)))}
self.reverse_cluster_map = {n: c for c, n in self.cluster_map.items()}
self.cluster_tree = raw_condensed_tree[raw_condensed_tree['child_size']
> 1]
self.max_lambdas = {}
self.leaf_max_lambdas = {}
self.exemplars = []
all_clusters = set(np.hstack([self.cluster_tree['parent'],
self.cluster_tree['child']]))
for cluster in all_clusters:
self.leaf_max_lambdas[cluster] = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == cluster].max()
for cluster in selected_clusters:
self.max_lambdas[cluster] = \
raw_condensed_tree['lambda_val'][raw_condensed_tree['parent']
== cluster].max()
for sub_cluster in self._clusters_below(cluster):
self.cluster_map[sub_cluster] = self.cluster_map[cluster]
self.max_lambdas[sub_cluster] = self.max_lambdas[cluster]
cluster_exemplars = np.array([], dtype=np.int64)
for leaf in self._recurse_leaf_dfs(cluster):
leaf_max_lambda = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == leaf].max()
points = raw_condensed_tree['child'][
(raw_condensed_tree['parent'] == leaf) &
(raw_condensed_tree['lambda_val'] == leaf_max_lambda)]
cluster_exemplars = np.hstack([cluster_exemplars, points])
self.exemplars.append(self.raw_data[cluster_exemplars])
def _find_neighbor_and_lambda(neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Find the nearest mutual reachability neighbor of a point, and compute
the associated lambda value for the point, given the mutual reachability
distance to a nearest neighbor.
Parameters
----------
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
neighbor : int
The index into the full raw data set of the nearest mutual reachability
distance neighbor of the point.
lambda_ : float
The lambda value at which this point joins/merges with `neighbor`.
"""
neighbor_core_distances = core_distances[neighbor_indices]
point_core_distances = neighbor_distances[min_samples] * np.ones(
neighbor_indices.shape[0])
mr_distances = np.vstack((
neighbor_core_distances,
point_core_distances,
neighbor_distances
)).max(axis=0)
nn_index = mr_distances.argmin()
nearest_neighbor = neighbor_indices[nn_index]
if mr_distances[nn_index] > 0.0:
lambda_ = 1. / mr_distances[nn_index]
else:
lambda_ = np.finfo(np.double).max
return nearest_neighbor, lambda_
def _extend_condensed_tree(tree, neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Create a new condensed tree with an additional point added, allowing for
computations as if this point had been part of the original tree. Note
that this makes as little change to the tree as possible, with no
re-optimizing/re-condensing so that the selected clusters remain
effectively unchanged.
Parameters
----------
tree : structured array
The raw format condensed tree to update.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
new_tree : structured array
The original tree with an extra row providing the parent cluster
and lambda information for a new point given index -1.
"""
tree_root = tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] <= lambda_:
# New point departs with the old
new_tree_row = (potential_cluster, -1, 1,
neighbor_tree_row['lambda_val'])
else:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
tree[tree['child'] ==
potential_cluster]['lambda_val'] >= lambda_:
potential_cluster = tree['parent'][tree['child']
== potential_cluster][0]
new_tree_row = (potential_cluster, -1, 1, lambda_)
return np.append(tree, new_tree_row)
def _find_cluster_and_probability(tree, cluster_tree, neighbor_indices,
neighbor_distances, core_distances,
cluster_map, max_lambdas,
min_samples):
"""
Return the cluster label (of the original clustering) and membership
probability of a new data point.
Parameters
----------
tree : CondensedTree
The condensed tree associated with the clustering.
cluster_tree : structured_array
The raw form of the condensed tree with only cluster information (no
data on individual points). This is significantly more compact.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
min_samples : int
The min_samples value used to generate core distances.
"""
raw_tree = tree._raw_tree
tree_root = cluster_tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(raw_tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] > lambda_:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
cluster_tree['lambda_val'][cluster_tree['child']
== potential_cluster] >= lambda_:
potential_cluster = cluster_tree['parent'][cluster_tree['child']
== potential_cluster][0]
if potential_cluster in cluster_map:
cluster_label = cluster_map[potential_cluster]
else:
cluster_label = -1
if cluster_label >= 0:
max_lambda = max_lambdas[potential_cluster]
if max_lambda > 0.0:
lambda_ = min(max_lambda, lambda_)
prob = (lambda_ / max_lambda)
else:
prob = 1.0
else:
prob = 0.0
return cluster_label, prob
def approximate_predict(clusterer, points_to_predict):
"""Predict the cluster label of new points. The returned labels
will be those of the original clustering found by ``clusterer``,
and therefore are not (necessarily) the cluster labels that would
be found by clustering the original data combined with
``points_to_predict``, hence the 'approximate' label.
If you simply wish to assign new points to an existing clustering
in the 'best' way possible, this is the function to use. If you
want to predict how ``points_to_predict`` would cluster with
the original data under HDBSCAN the most efficient existing approach
is to simply recluster with the new point(s) added to the original dataset.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
labels : array (n_samples,)
The predicted labels of the ``points_to_predict``
probabilities : array (n_samples,)
The soft cluster scores for each of the ``points_to_predict``
See Also
--------
:py:func:`hdbscan.predict.membership_vector`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
if clusterer.prediction_data_ is None:
raise ValueError('Clusterer does not have prediction data!'
' Try fitting with prediction_data=True set,'
' or run generate_prediction_data on the clusterer')
points_to_predict = np.asarray(points_to_predict)
if points_to_predict.shape[1] != \
clusterer.prediction_data_.raw_data.shape[1]:
raise ValueError('New points dimension does not match fit data!')
if clusterer.prediction_data_.cluster_tree.shape[0] == 0:
warn('Clusterer does not have any defined clusters, new data'
' will be automatically predicted as noise.')
labels = -1 * np.ones(points_to_predict.shape[0], dtype=np.int32)
probabilities = np.zeros(points_to_predict.shape[0], dtype=np.float32)
return labels, probabilities
labels = np.empty(points_to_predict.shape[0], dtype=np.int)
probabilities = np.empty(points_to_predict.shape[0], dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
label, prob = _find_cluster_and_probability(
clusterer.condensed_tree_,
clusterer.prediction_data_.cluster_tree,
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
clusterer.prediction_data_.cluster_map,
clusterer.prediction_data_.max_lambdas,
min_samples
)
labels[i] = label
probabilities[i] = prob
return labels, probabilities
def membership_vector(clusterer, points_to_predict):
"""Predict soft cluster membership. The result produces a vector
for each point in ``points_to_predict`` that gives a probability that
the given point is a member of a cluster for each of the selected clusters
of the ``clusterer``.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` is a member of cluster ``j`` is
in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(
sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
result = np.empty((points_to_predict.shape[0], clusters.shape[0]),
dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
# We need to find where in the tree the new point would go
# for the purposes of outlier membership approximation
nearest_neighbor, lambda_ = \
_find_neighbor_and_lambda(
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
min_samples)
neighbor_tree_row = get_tree_row_with_child(
clusterer.condensed_tree_._raw_tree, nearest_neighbor)
if neighbor_tree_row['lambda_val'] <= lambda_:
lambda_ = neighbor_tree_row['lambda_val']
distance_vec = dist_membership_vector(
points_to_predict[i],
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vec = outlier_membership_vector(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result[i] = distance_vec ** 0.5 * outlier_vec ** 2.0
result[i] /= result[i].sum()
result[i] *= prob_in_some_cluster(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
return result
def all_points_membership_vectors(clusterer):
"""Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
all_points = clusterer.prediction_data_.raw_data
# When no clusters found, return array of 0's
if clusters.size == 0:
return np.zeros(all_points.shape[0])
distance_vecs = all_points_dist_membership_vector(
all_points,
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vecs = all_points_outlier_membership_vector(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
in_cluster_probs = all_points_prob_in_some_cluster(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
return result
|
normalize | Normalize text.
Args:
text (str): text to be normalized | import re
class Normalizer:
"""Normalizer return the text replaced with 'repl'.
If 'repl' is None, normalization is not applied to the pattern corresponding to 'repl'.
Args:
url_repl (str): replace all urls in text with this
tag_repl (str): replace all tags in text with this
emoji_repl (str): replace all emojis in text with this
email_repl (str): replace all emails in text with this
tel_repl (str): replace all tels in text with this
"""
def __init__(self, url_repl='[URL]', tag_repl='[TAG]', emoji_repl='[EMOJI]', email_repl='[EMAIL]', tel_repl='[TEL]'):
# repls
self.url_repl = url_repl
self.tag_repl = tag_repl
self.emoji_repl = emoji_repl
self.email_repl = email_repl
self.tel_repl = tel_repl
self._normalize = []
self._init_normalize()
# MASKED: normalize function (lines 25-34)
def _init_normalize(self) -> None:
"""Initialize normalize function.
If 'repl' is None, normalization is not applied to the pattern corresponding to 'repl'.
"""
if self.url_repl is not None:
self._normalize.append((self._url_normalize, self.url_repl))
if self.tag_repl is not None:
self._normalize.append((self._tag_normalize, self.tag_repl))
if self.emoji_repl is not None:
self._normalize.append((self._emoji_normalize, self.emoji_repl))
if self.email_repl is not None:
self._normalize.append((self._email_normalize, self.email_repl))
if self.tel_repl is not None:
self._normalize.append((self._tel_normalize, self.tel_repl))
def _url_normalize(self, text: str, repl: str, regex=re.compile(r'(https?|ftp|www)\S+')) -> str:
"""Return the string obtained by replacing all urls in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all urls in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _tag_normalize(self, text: str, repl: str, regex=re.compile(r'<[^>]*>')) -> str:
"""Return the string obtained by replacing all HTML tags in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all HTML tags in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _emoji_normalize(self, text: str, repl: str, regex=re.compile(r'\U0001f469\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f468|\U0001f468\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f468|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f469|\U0001f9d1\U0001f3fb\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3ff|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fe|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f468\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\u200d\u2764\u200d\U0001f48b\u200d\U0001f468|\U0001f468\u200d\u2764\u200d\U0001f48b\u200d\U0001f468|\U0001f469\u200d\u2764\u200d\U0001f48b\u200d\U0001f469|\U0001f468\u200d\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f468\u200d\U0001f468\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f467\u200d\U0001f467|\U0001f469\u200d\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f3f4\U000e0067\U000e0062\U000e0065\U000e006e\U000e0067\U000e007f|\U0001f3f4\U000e0067\U000e0062\U000e0073\U000e0063\U000e0074\U000e007f|\U0001f3f4\U000e0067\U000e0062\U000e0077\U000e006c\U000e0073\U000e007f|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f468|\U0001f468\u200d\u2764\ufe0f\u200d\U0001f468|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f469|\U0001f441\ufe0f\u200d\U0001f5e8\ufe0f|\U0001f471\U0001f3fb\u200d\u2642\ufe0f|\U0001f471\U0001f3fc\u200d\u2642\ufe0f|\U0001f471\U0001f3fd\u200d\u2642\ufe0f|\U0001f471\U0001f3fe\u200d\u2642\ufe0f|\U0001f471\U0001f3ff\u200d\u2642\ufe0f|\U0001f471\U0001f3fb\u200d\u2640\ufe0f|\U0001f471\U0001f3fc\u200d\u2640\ufe0f|\U0001f471\U0001f3fd\u200d\u2640\ufe0f|\U0001f471\U0001f3fe\u200d\u2640\ufe0f|\U0001f471\U0001f3ff\u200d\u2640\ufe0f|\U0001f64d\U0001f3fb\u200d\u2642\ufe0f|\U0001f64d\U0001f3fc\u200d\u2642\ufe0f|\U0001f64d\U0001f3fd\u200d\u2642\ufe0f|\U0001f64d\U0001f3fe\u200d\u2642\ufe0f|\U0001f64d\U0001f3ff\u200d\u2642\ufe0f|\U0001f64d\U0001f3fb\u200d\u2640\ufe0f|\U0001f64d\U0001f3fc\u200d\u2640\ufe0f|\U0001f64d\U0001f3fd\u200d\u2640\ufe0f|\U0001f64d\U0001f3fe\u200d\u2640\ufe0f|\U0001f64d\U0001f3ff\u200d\u2640\ufe0f|\U0001f64e\U0001f3fb\u200d\u2642\ufe0f|\U0001f64e\U0001f3fc\u200d\u2642\ufe0f|\U0001f64e\U0001f3fd\u200d\u2642\ufe0f|\U0001f64e\U0001f3fe\u200d\u2642\ufe0f|\U0001f64e\U0001f3ff\u200d\u2642\ufe0f|\U0001f64e\U0001f3fb\u200d\u2640\ufe0f|\U0001f64e\U0001f3fc\u200d\u2640\ufe0f|\U0001f64e\U0001f3fd\u200d\u2640\ufe0f|\U0001f64e\U0001f3fe\u200d\u2640\ufe0f|\U0001f64e\U0001f3ff\u200d\u2640\ufe0f|\U0001f645\U0001f3fb\u200d\u2642\ufe0f|\U0001f645\U0001f3fc\u200d\u2642\ufe0f|\U0001f645\U0001f3fd\u200d\u2642\ufe0f|\U0001f645\U0001f3fe\u200d\u2642\ufe0f|\U0001f645\U0001f3ff\u200d\u2642\ufe0f|\U0001f645\U0001f3fb\u200d\u2640\ufe0f|\U0001f645\U0001f3fc\u200d\u2640\ufe0f|\U0001f645\U0001f3fd\u200d\u2640\ufe0f|\U0001f645\U0001f3fe\u200d\u2640\ufe0f|\U0001f645\U0001f3ff\u200d\u2640\ufe0f|\U0001f646\U0001f3fb\u200d\u2642\ufe0f|\U0001f646\U0001f3fc\u200d\u2642\ufe0f|\U0001f646\U0001f3fd\u200d\u2642\ufe0f|\U0001f646\U0001f3fe\u200d\u2642\ufe0f|\U0001f646\U0001f3ff\u200d\u2642\ufe0f|\U0001f646\U0001f3fb\u200d\u2640\ufe0f|\U0001f646\U0001f3fc\u200d\u2640\ufe0f|\U0001f646\U0001f3fd\u200d\u2640\ufe0f|\U0001f646\U0001f3fe\u200d\u2640\ufe0f|\U0001f646\U0001f3ff\u200d\u2640\ufe0f|\U0001f481\U0001f3fb\u200d\u2642\ufe0f|\U0001f481\U0001f3fc\u200d\u2642\ufe0f|\U0001f481\U0001f3fd\u200d\u2642\ufe0f|\U0001f481\U0001f3fe\u200d\u2642\ufe0f|\U0001f481\U0001f3ff\u200d\u2642\ufe0f|\U0001f481\U0001f3fb\u200d\u2640\ufe0f|\U0001f481\U0001f3fc\u200d\u2640\ufe0f|\U0001f481\U0001f3fd\u200d\u2640\ufe0f|\U0001f481\U0001f3fe\u200d\u2640\ufe0f|\U0001f481\U0001f3ff\u200d\u2640\ufe0f|\U0001f64b\U0001f3fb\u200d\u2642\ufe0f|\U0001f64b\U0001f3fc\u200d\u2642\ufe0f|\U0001f64b\U0001f3fd\u200d\u2642\ufe0f|\U0001f64b\U0001f3fe\u200d\u2642\ufe0f|\U0001f64b\U0001f3ff\u200d\u2642\ufe0f|\U0001f64b\U0001f3fb\u200d\u2640\ufe0f|\U0001f64b\U0001f3fc\u200d\u2640\ufe0f|\U0001f64b\U0001f3fd\u200d\u2640\ufe0f|\U0001f64b\U0001f3fe\u200d\u2640\ufe0f|\U0001f64b\U0001f3ff\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fc\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fd\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fe\u200d\u2642\ufe0f|\U0001f9cf\U0001f3ff\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fc\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fd\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fe\u200d\u2640\ufe0f|\U0001f9cf\U0001f3ff\u200d\u2640\ufe0f|\U0001f647\U0001f3fb\u200d\u2642\ufe0f|\U0001f647\U0001f3fc\u200d\u2642\ufe0f|\U0001f647\U0001f3fd\u200d\u2642\ufe0f|\U0001f647\U0001f3fe\u200d\u2642\ufe0f|\U0001f647\U0001f3ff\u200d\u2642\ufe0f|\U0001f647\U0001f3fb\u200d\u2640\ufe0f|\U0001f647\U0001f3fc\u200d\u2640\ufe0f|\U0001f647\U0001f3fd\u200d\u2640\ufe0f|\U0001f647\U0001f3fe\u200d\u2640\ufe0f|\U0001f647\U0001f3ff\u200d\u2640\ufe0f|\U0001f926\U0001f3fb\u200d\u2642\ufe0f|\U0001f926\U0001f3fc\u200d\u2642\ufe0f|\U0001f926\U0001f3fd\u200d\u2642\ufe0f|\U0001f926\U0001f3fe\u200d\u2642\ufe0f|\U0001f926\U0001f3ff\u200d\u2642\ufe0f|\U0001f926\U0001f3fb\u200d\u2640\ufe0f|\U0001f926\U0001f3fc\u200d\u2640\ufe0f|\U0001f926\U0001f3fd\u200d\u2640\ufe0f|\U0001f926\U0001f3fe\u200d\u2640\ufe0f|\U0001f926\U0001f3ff\u200d\u2640\ufe0f|\U0001f937\U0001f3fb\u200d\u2642\ufe0f|\U0001f937\U0001f3fc\u200d\u2642\ufe0f|\U0001f937\U0001f3fd\u200d\u2642\ufe0f|\U0001f937\U0001f3fe\u200d\u2642\ufe0f|\U0001f937\U0001f3ff\u200d\u2642\ufe0f|\U0001f937\U0001f3fb\u200d\u2640\ufe0f|\U0001f937\U0001f3fc\u200d\u2640\ufe0f|\U0001f937\U0001f3fd\u200d\u2640\ufe0f|\U0001f937\U0001f3fe\u200d\u2640\ufe0f|\U0001f937\U0001f3ff\u200d\u2640\ufe0f|\U0001f468\U0001f3fb\u200d\u2695\ufe0f|\U0001f468\U0001f3fc\u200d\u2695\ufe0f|\U0001f468\U0001f3fd\u200d\u2695\ufe0f|\U0001f468\U0001f3fe\u200d\u2695\ufe0f|\U0001f468\U0001f3ff\u200d\u2695\ufe0f|\U0001f469\U0001f3fb\u200d\u2695\ufe0f|\U0001f469\U0001f3fc\u200d\u2695\ufe0f|\U0001f469\U0001f3fd\u200d\u2695\ufe0f|\U0001f469\U0001f3fe\u200d\u2695\ufe0f|\U0001f469\U0001f3ff\u200d\u2695\ufe0f|\U0001f468\U0001f3fb\u200d\u2696\ufe0f|\U0001f468\U0001f3fc\u200d\u2696\ufe0f|\U0001f468\U0001f3fd\u200d\u2696\ufe0f|\U0001f468\U0001f3fe\u200d\u2696\ufe0f|\U0001f468\U0001f3ff\u200d\u2696\ufe0f|\U0001f469\U0001f3fb\u200d\u2696\ufe0f|\U0001f469\U0001f3fc\u200d\u2696\ufe0f|\U0001f469\U0001f3fd\u200d\u2696\ufe0f|\U0001f469\U0001f3fe\u200d\u2696\ufe0f|\U0001f469\U0001f3ff\u200d\u2696\ufe0f|\U0001f468\U0001f3fb\u200d\u2708\ufe0f|\U0001f468\U0001f3fc\u200d\u2708\ufe0f|\U0001f468\U0001f3fd\u200d\u2708\ufe0f|\U0001f468\U0001f3fe\u200d\u2708\ufe0f|\U0001f468\U0001f3ff\u200d\u2708\ufe0f|\U0001f469\U0001f3fb\u200d\u2708\ufe0f|\U0001f469\U0001f3fc\u200d\u2708\ufe0f|\U0001f469\U0001f3fd\u200d\u2708\ufe0f|\U0001f469\U0001f3fe\u200d\u2708\ufe0f|\U0001f469\U0001f3ff\u200d\u2708\ufe0f|\U0001f46e\U0001f3fb\u200d\u2642\ufe0f|\U0001f46e\U0001f3fc\u200d\u2642\ufe0f|\U0001f46e\U0001f3fd\u200d\u2642\ufe0f|\U0001f46e\U0001f3fe\u200d\u2642\ufe0f|\U0001f46e\U0001f3ff\u200d\u2642\ufe0f|\U0001f46e\U0001f3fb\u200d\u2640\ufe0f|\U0001f46e\U0001f3fc\u200d\u2640\ufe0f|\U0001f46e\U0001f3fd\u200d\u2640\ufe0f|\U0001f46e\U0001f3fe\u200d\u2640\ufe0f|\U0001f46e\U0001f3ff\u200d\u2640\ufe0f|\U0001f575\ufe0f\u200d\u2642\ufe0f|\U0001f575\U0001f3fb\u200d\u2642\ufe0f|\U0001f575\U0001f3fc\u200d\u2642\ufe0f|\U0001f575\U0001f3fd\u200d\u2642\ufe0f|\U0001f575\U0001f3fe\u200d\u2642\ufe0f|\U0001f575\U0001f3ff\u200d\u2642\ufe0f|\U0001f575\ufe0f\u200d\u2640\ufe0f|\U0001f575\U0001f3fb\u200d\u2640\ufe0f|\U0001f575\U0001f3fc\u200d\u2640\ufe0f|\U0001f575\U0001f3fd\u200d\u2640\ufe0f|\U0001f575\U0001f3fe\u200d\u2640\ufe0f|\U0001f575\U0001f3ff\u200d\u2640\ufe0f|\U0001f482\U0001f3fb\u200d\u2642\ufe0f|\U0001f482\U0001f3fc\u200d\u2642\ufe0f|\U0001f482\U0001f3fd\u200d\u2642\ufe0f|\U0001f482\U0001f3fe\u200d\u2642\ufe0f|\U0001f482\U0001f3ff\u200d\u2642\ufe0f|\U0001f482\U0001f3fb\u200d\u2640\ufe0f|\U0001f482\U0001f3fc\u200d\u2640\ufe0f|\U0001f482\U0001f3fd\u200d\u2640\ufe0f|\U0001f482\U0001f3fe\u200d\u2640\ufe0f|\U0001f482\U0001f3ff\u200d\u2640\ufe0f|\U0001f477\U0001f3fb\u200d\u2642\ufe0f|\U0001f477\U0001f3fc\u200d\u2642\ufe0f|\U0001f477\U0001f3fd\u200d\u2642\ufe0f|\U0001f477\U0001f3fe\u200d\u2642\ufe0f|\U0001f477\U0001f3ff\u200d\u2642\ufe0f|\U0001f477\U0001f3fb\u200d\u2640\ufe0f|\U0001f477\U0001f3fc\u200d\u2640\ufe0f|\U0001f477\U0001f3fd\u200d\u2640\ufe0f|\U0001f477\U0001f3fe\u200d\u2640\ufe0f|\U0001f477\U0001f3ff\u200d\u2640\ufe0f|\U0001f473\U0001f3fb\u200d\u2642\ufe0f|\U0001f473\U0001f3fc\u200d\u2642\ufe0f|\U0001f473\U0001f3fd\u200d\u2642\ufe0f|\U0001f473\U0001f3fe\u200d\u2642\ufe0f|\U0001f473\U0001f3ff\u200d\u2642\ufe0f|\U0001f473\U0001f3fb\u200d\u2640\ufe0f|\U0001f473\U0001f3fc\u200d\u2640\ufe0f|\U0001f473\U0001f3fd\u200d\u2640\ufe0f|\U0001f473\U0001f3fe\u200d\u2640\ufe0f|\U0001f473\U0001f3ff\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fc\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fd\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fe\u200d\u2642\ufe0f|\U0001f9b8\U0001f3ff\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fc\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fd\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fe\u200d\u2640\ufe0f|\U0001f9b8\U0001f3ff\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fc\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fd\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fe\u200d\u2642\ufe0f|\U0001f9b9\U0001f3ff\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fc\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fd\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fe\u200d\u2640\ufe0f|\U0001f9b9\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d9\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d9\U0001f3ff\u200d\u2640\ufe0f|\U0001f9da\U0001f3fb\u200d\u2642\ufe0f|\U0001f9da\U0001f3fc\u200d\u2642\ufe0f|\U0001f9da\U0001f3fd\u200d\u2642\ufe0f|\U0001f9da\U0001f3fe\u200d\u2642\ufe0f|\U0001f9da\U0001f3ff\u200d\u2642\ufe0f|\U0001f9da\U0001f3fb\u200d\u2640\ufe0f|\U0001f9da\U0001f3fc\u200d\u2640\ufe0f|\U0001f9da\U0001f3fd\u200d\u2640\ufe0f|\U0001f9da\U0001f3fe\u200d\u2640\ufe0f|\U0001f9da\U0001f3ff\u200d\u2640\ufe0f|\U0001f9db\U0001f3fb\u200d\u2642\ufe0f|\U0001f9db\U0001f3fc\u200d\u2642\ufe0f|\U0001f9db\U0001f3fd\u200d\u2642\ufe0f|\U0001f9db\U0001f3fe\u200d\u2642\ufe0f|\U0001f9db\U0001f3ff\u200d\u2642\ufe0f|\U0001f9db\U0001f3fb\u200d\u2640\ufe0f|\U0001f9db\U0001f3fc\u200d\u2640\ufe0f|\U0001f9db\U0001f3fd\u200d\u2640\ufe0f|\U0001f9db\U0001f3fe\u200d\u2640\ufe0f|\U0001f9db\U0001f3ff\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fc\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fd\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fe\u200d\u2642\ufe0f|\U0001f9dc\U0001f3ff\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fc\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fd\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fe\u200d\u2640\ufe0f|\U0001f9dc\U0001f3ff\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fc\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fd\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fe\u200d\u2642\ufe0f|\U0001f9dd\U0001f3ff\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fc\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fd\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fe\u200d\u2640\ufe0f|\U0001f9dd\U0001f3ff\u200d\u2640\ufe0f|\U0001f486\U0001f3fb\u200d\u2642\ufe0f|\U0001f486\U0001f3fc\u200d\u2642\ufe0f|\U0001f486\U0001f3fd\u200d\u2642\ufe0f|\U0001f486\U0001f3fe\u200d\u2642\ufe0f|\U0001f486\U0001f3ff\u200d\u2642\ufe0f|\U0001f486\U0001f3fb\u200d\u2640\ufe0f|\U0001f486\U0001f3fc\u200d\u2640\ufe0f|\U0001f486\U0001f3fd\u200d\u2640\ufe0f|\U0001f486\U0001f3fe\u200d\u2640\ufe0f|\U0001f486\U0001f3ff\u200d\u2640\ufe0f|\U0001f487\U0001f3fb\u200d\u2642\ufe0f|\U0001f487\U0001f3fc\u200d\u2642\ufe0f|\U0001f487\U0001f3fd\u200d\u2642\ufe0f|\U0001f487\U0001f3fe\u200d\u2642\ufe0f|\U0001f487\U0001f3ff\u200d\u2642\ufe0f|\U0001f487\U0001f3fb\u200d\u2640\ufe0f|\U0001f487\U0001f3fc\u200d\u2640\ufe0f|\U0001f487\U0001f3fd\u200d\u2640\ufe0f|\U0001f487\U0001f3fe\u200d\u2640\ufe0f|\U0001f487\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b6\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b6\U0001f3ff\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fc\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fd\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fe\u200d\u2642\ufe0f|\U0001f9cd\U0001f3ff\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fc\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fd\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fe\u200d\u2640\ufe0f|\U0001f9cd\U0001f3ff\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fc\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fd\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fe\u200d\u2642\ufe0f|\U0001f9ce\U0001f3ff\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fc\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fd\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fe\u200d\u2640\ufe0f|\U0001f9ce\U0001f3ff\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fc\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fd\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fe\u200d\u2642\ufe0f|\U0001f3c3\U0001f3ff\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fc\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fd\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fe\u200d\u2640\ufe0f|\U0001f3c3\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d6\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d6\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d7\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d7\U0001f3ff\u200d\u2640\ufe0f|\U0001f3cc\ufe0f\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fb\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fc\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fd\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fe\u200d\u2642\ufe0f|\U0001f3cc\U0001f3ff\u200d\u2642\ufe0f|\U0001f3cc\ufe0f\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fb\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fc\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fd\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fe\u200d\u2640\ufe0f|\U0001f3cc\U0001f3ff\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fc\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fd\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fe\u200d\u2642\ufe0f|\U0001f3c4\U0001f3ff\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fc\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fd\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fe\u200d\u2640\ufe0f|\U0001f3c4\U0001f3ff\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fc\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fd\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fe\u200d\u2642\ufe0f|\U0001f6a3\U0001f3ff\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fc\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fd\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fe\u200d\u2640\ufe0f|\U0001f6a3\U0001f3ff\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fc\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fd\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fe\u200d\u2642\ufe0f|\U0001f3ca\U0001f3ff\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fc\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fd\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fe\u200d\u2640\ufe0f|\U0001f3ca\U0001f3ff\u200d\u2640\ufe0f|\u26f9\ufe0f\u200d\u2642\ufe0f|\u26f9\U0001f3fb\u200d\u2642\ufe0f|\u26f9\U0001f3fc\u200d\u2642\ufe0f|\u26f9\U0001f3fd\u200d\u2642\ufe0f|\u26f9\U0001f3fe\u200d\u2642\ufe0f|\u26f9\U0001f3ff\u200d\u2642\ufe0f|\u26f9\ufe0f\u200d\u2640\ufe0f|\u26f9\U0001f3fb\u200d\u2640\ufe0f|\u26f9\U0001f3fc\u200d\u2640\ufe0f|\u26f9\U0001f3fd\u200d\u2640\ufe0f|\u26f9\U0001f3fe\u200d\u2640\ufe0f|\u26f9\U0001f3ff\u200d\u2640\ufe0f|\U0001f3cb\ufe0f\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fb\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fc\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fd\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fe\u200d\u2642\ufe0f|\U0001f3cb\U0001f3ff\u200d\u2642\ufe0f|\U0001f3cb\ufe0f\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fb\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fc\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fd\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fe\u200d\u2640\ufe0f|\U0001f3cb\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b4\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b4\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b5\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b5\U0001f3ff\u200d\u2640\ufe0f|\U0001f938\U0001f3fb\u200d\u2642\ufe0f|\U0001f938\U0001f3fc\u200d\u2642\ufe0f|\U0001f938\U0001f3fd\u200d\u2642\ufe0f|\U0001f938\U0001f3fe\u200d\u2642\ufe0f|\U0001f938\U0001f3ff\u200d\u2642\ufe0f|\U0001f938\U0001f3fb\u200d\u2640\ufe0f|\U0001f938\U0001f3fc\u200d\u2640\ufe0f|\U0001f938\U0001f3fd\u200d\u2640\ufe0f|\U0001f938\U0001f3fe\u200d\u2640\ufe0f|\U0001f938\U0001f3ff\u200d\u2640\ufe0f|\U0001f93d\U0001f3fb\u200d\u2642\ufe0f|\U0001f93d\U0001f3fc\u200d\u2642\ufe0f|\U0001f93d\U0001f3fd\u200d\u2642\ufe0f|\U0001f93d\U0001f3fe\u200d\u2642\ufe0f|\U0001f93d\U0001f3ff\u200d\u2642\ufe0f|\U0001f93d\U0001f3fb\u200d\u2640\ufe0f|\U0001f93d\U0001f3fc\u200d\u2640\ufe0f|\U0001f93d\U0001f3fd\u200d\u2640\ufe0f|\U0001f93d\U0001f3fe\u200d\u2640\ufe0f|\U0001f93d\U0001f3ff\u200d\u2640\ufe0f|\U0001f93e\U0001f3fb\u200d\u2642\ufe0f|\U0001f93e\U0001f3fc\u200d\u2642\ufe0f|\U0001f93e\U0001f3fd\u200d\u2642\ufe0f|\U0001f93e\U0001f3fe\u200d\u2642\ufe0f|\U0001f93e\U0001f3ff\u200d\u2642\ufe0f|\U0001f93e\U0001f3fb\u200d\u2640\ufe0f|\U0001f93e\U0001f3fc\u200d\u2640\ufe0f|\U0001f93e\U0001f3fd\u200d\u2640\ufe0f|\U0001f93e\U0001f3fe\u200d\u2640\ufe0f|\U0001f93e\U0001f3ff\u200d\u2640\ufe0f|\U0001f939\U0001f3fb\u200d\u2642\ufe0f|\U0001f939\U0001f3fc\u200d\u2642\ufe0f|\U0001f939\U0001f3fd\u200d\u2642\ufe0f|\U0001f939\U0001f3fe\u200d\u2642\ufe0f|\U0001f939\U0001f3ff\u200d\u2642\ufe0f|\U0001f939\U0001f3fb\u200d\u2640\ufe0f|\U0001f939\U0001f3fc\u200d\u2640\ufe0f|\U0001f939\U0001f3fd\u200d\u2640\ufe0f|\U0001f939\U0001f3fe\u200d\u2640\ufe0f|\U0001f939\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d8\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d8\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d1\u200d\U0001f91d\u200d\U0001f9d1|\U0001f469\u200d\u2764\u200d\U0001f468|\U0001f468\u200d\u2764\u200d\U0001f468|\U0001f469\u200d\u2764\u200d\U0001f469|\U0001f468\u200d\U0001f469\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f467|\U0001f468\u200d\U0001f468\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f467|\U0001f469\u200d\U0001f469\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f467|\U0001f468\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f467\u200d\U0001f467|\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f441\u200d\U0001f5e8\ufe0f|\U0001f441\ufe0f\u200d\U0001f5e8|\U0001f471\u200d\u2642\ufe0f|\U0001f471\U0001f3fb\u200d\u2642|\U0001f471\U0001f3fc\u200d\u2642|\U0001f471\U0001f3fd\u200d\u2642|\U0001f471\U0001f3fe\u200d\u2642|\U0001f471\U0001f3ff\u200d\u2642|\U0001f468\U0001f3fb\u200d\U0001f9b0|\U0001f468\U0001f3fc\u200d\U0001f9b0|\U0001f468\U0001f3fd\u200d\U0001f9b0|\U0001f468\U0001f3fe\u200d\U0001f9b0|\U0001f468\U0001f3ff\u200d\U0001f9b0|\U0001f468\U0001f3fb\u200d\U0001f9b1|\U0001f468\U0001f3fc\u200d\U0001f9b1|\U0001f468\U0001f3fd\u200d\U0001f9b1|\U0001f468\U0001f3fe\u200d\U0001f9b1|\U0001f468\U0001f3ff\u200d\U0001f9b1|\U0001f468\U0001f3fb\u200d\U0001f9b3|\U0001f468\U0001f3fc\u200d\U0001f9b3|\U0001f468\U0001f3fd\u200d\U0001f9b3|\U0001f468\U0001f3fe\u200d\U0001f9b3|\U0001f468\U0001f3ff\u200d\U0001f9b3|\U0001f468\U0001f3fb\u200d\U0001f9b2|\U0001f468\U0001f3fc\u200d\U0001f9b2|\U0001f468\U0001f3fd\u200d\U0001f9b2|\U0001f468\U0001f3fe\u200d\U0001f9b2|\U0001f468\U0001f3ff\u200d\U0001f9b2|\U0001f471\u200d\u2640\ufe0f|\U0001f471\U0001f3fb\u200d\u2640|\U0001f471\U0001f3fc\u200d\u2640|\U0001f471\U0001f3fd\u200d\u2640|\U0001f471\U0001f3fe\u200d\u2640|\U0001f471\U0001f3ff\u200d\u2640|\U0001f469\U0001f3fb\u200d\U0001f9b0|\U0001f469\U0001f3fc\u200d\U0001f9b0|\U0001f469\U0001f3fd\u200d\U0001f9b0|\U0001f469\U0001f3fe\u200d\U0001f9b0|\U0001f469\U0001f3ff\u200d\U0001f9b0|\U0001f469\U0001f3fb\u200d\U0001f9b1|\U0001f469\U0001f3fc\u200d\U0001f9b1|\U0001f469\U0001f3fd\u200d\U0001f9b1|\U0001f469\U0001f3fe\u200d\U0001f9b1|\U0001f469\U0001f3ff\u200d\U0001f9b1|\U0001f469\U0001f3fb\u200d\U0001f9b3|\U0001f469\U0001f3fc\u200d\U0001f9b3|\U0001f469\U0001f3fd\u200d\U0001f9b3|\U0001f469\U0001f3fe\u200d\U0001f9b3|\U0001f469\U0001f3ff\u200d\U0001f9b3|\U0001f469\U0001f3fb\u200d\U0001f9b2|\U0001f469\U0001f3fc\u200d\U0001f9b2|\U0001f469\U0001f3fd\u200d\U0001f9b2|\U0001f469\U0001f3fe\u200d\U0001f9b2|\U0001f469\U0001f3ff\u200d\U0001f9b2|\U0001f64d\u200d\u2642\ufe0f|\U0001f64d\U0001f3fb\u200d\u2642|\U0001f64d\U0001f3fc\u200d\u2642|\U0001f64d\U0001f3fd\u200d\u2642|\U0001f64d\U0001f3fe\u200d\u2642|\U0001f64d\U0001f3ff\u200d\u2642|\U0001f64d\u200d\u2640\ufe0f|\U0001f64d\U0001f3fb\u200d\u2640|\U0001f64d\U0001f3fc\u200d\u2640|\U0001f64d\U0001f3fd\u200d\u2640|\U0001f64d\U0001f3fe\u200d\u2640|\U0001f64d\U0001f3ff\u200d\u2640|\U0001f64e\u200d\u2642\ufe0f|\U0001f64e\U0001f3fb\u200d\u2642|\U0001f64e\U0001f3fc\u200d\u2642|\U0001f64e\U0001f3fd\u200d\u2642|\U0001f64e\U0001f3fe\u200d\u2642|\U0001f64e\U0001f3ff\u200d\u2642|\U0001f64e\u200d\u2640\ufe0f|\U0001f64e\U0001f3fb\u200d\u2640|\U0001f64e\U0001f3fc\u200d\u2640|\U0001f64e\U0001f3fd\u200d\u2640|\U0001f64e\U0001f3fe\u200d\u2640|\U0001f64e\U0001f3ff\u200d\u2640|\U0001f645\u200d\u2642\ufe0f|\U0001f645\U0001f3fb\u200d\u2642|\U0001f645\U0001f3fc\u200d\u2642|\U0001f645\U0001f3fd\u200d\u2642|\U0001f645\U0001f3fe\u200d\u2642|\U0001f645\U0001f3ff\u200d\u2642|\U0001f645\u200d\u2640\ufe0f|\U0001f645\U0001f3fb\u200d\u2640|\U0001f645\U0001f3fc\u200d\u2640|\U0001f645\U0001f3fd\u200d\u2640|\U0001f645\U0001f3fe\u200d\u2640|\U0001f645\U0001f3ff\u200d\u2640|\U0001f646\u200d\u2642\ufe0f|\U0001f646\U0001f3fb\u200d\u2642|\U0001f646\U0001f3fc\u200d\u2642|\U0001f646\U0001f3fd\u200d\u2642|\U0001f646\U0001f3fe\u200d\u2642|\U0001f646\U0001f3ff\u200d\u2642|\U0001f646\u200d\u2640\ufe0f|\U0001f646\U0001f3fb\u200d\u2640|\U0001f646\U0001f3fc\u200d\u2640|\U0001f646\U0001f3fd\u200d\u2640|\U0001f646\U0001f3fe\u200d\u2640|\U0001f646\U0001f3ff\u200d\u2640|\U0001f481\u200d\u2642\ufe0f|\U0001f481\U0001f3fb\u200d\u2642|\U0001f481\U0001f3fc\u200d\u2642|\U0001f481\U0001f3fd\u200d\u2642|\U0001f481\U0001f3fe\u200d\u2642|\U0001f481\U0001f3ff\u200d\u2642|\U0001f481\u200d\u2640\ufe0f|\U0001f481\U0001f3fb\u200d\u2640|\U0001f481\U0001f3fc\u200d\u2640|\U0001f481\U0001f3fd\u200d\u2640|\U0001f481\U0001f3fe\u200d\u2640|\U0001f481\U0001f3ff\u200d\u2640|\U0001f64b\u200d\u2642\ufe0f|\U0001f64b\U0001f3fb\u200d\u2642|\U0001f64b\U0001f3fc\u200d\u2642|\U0001f64b\U0001f3fd\u200d\u2642|\U0001f64b\U0001f3fe\u200d\u2642|\U0001f64b\U0001f3ff\u200d\u2642|\U0001f64b\u200d\u2640\ufe0f|\U0001f64b\U0001f3fb\u200d\u2640|\U0001f64b\U0001f3fc\u200d\u2640|\U0001f64b\U0001f3fd\u200d\u2640|\U0001f64b\U0001f3fe\u200d\u2640|\U0001f64b\U0001f3ff\u200d\u2640|\U0001f9cf\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2642|\U0001f9cf\U0001f3fc\u200d\u2642|\U0001f9cf\U0001f3fd\u200d\u2642|\U0001f9cf\U0001f3fe\u200d\u2642|\U0001f9cf\U0001f3ff\u200d\u2642|\U0001f9cf\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2640|\U0001f9cf\U0001f3fc\u200d\u2640|\U0001f9cf\U0001f3fd\u200d\u2640|\U0001f9cf\U0001f3fe\u200d\u2640|\U0001f9cf\U0001f3ff\u200d\u2640|\U0001f647\u200d\u2642\ufe0f|\U0001f647\U0001f3fb\u200d\u2642|\U0001f647\U0001f3fc\u200d\u2642|\U0001f647\U0001f3fd\u200d\u2642|\U0001f647\U0001f3fe\u200d\u2642|\U0001f647\U0001f3ff\u200d\u2642|\U0001f647\u200d\u2640\ufe0f|\U0001f647\U0001f3fb\u200d\u2640|\U0001f647\U0001f3fc\u200d\u2640|\U0001f647\U0001f3fd\u200d\u2640|\U0001f647\U0001f3fe\u200d\u2640|\U0001f647\U0001f3ff\u200d\u2640|\U0001f926\u200d\u2642\ufe0f|\U0001f926\U0001f3fb\u200d\u2642|\U0001f926\U0001f3fc\u200d\u2642|\U0001f926\U0001f3fd\u200d\u2642|\U0001f926\U0001f3fe\u200d\u2642|\U0001f926\U0001f3ff\u200d\u2642|\U0001f926\u200d\u2640\ufe0f|\U0001f926\U0001f3fb\u200d\u2640|\U0001f926\U0001f3fc\u200d\u2640|\U0001f926\U0001f3fd\u200d\u2640|\U0001f926\U0001f3fe\u200d\u2640|\U0001f926\U0001f3ff\u200d\u2640|\U0001f937\u200d\u2642\ufe0f|\U0001f937\U0001f3fb\u200d\u2642|\U0001f937\U0001f3fc\u200d\u2642|\U0001f937\U0001f3fd\u200d\u2642|\U0001f937\U0001f3fe\u200d\u2642|\U0001f937\U0001f3ff\u200d\u2642|\U0001f937\u200d\u2640\ufe0f|\U0001f937\U0001f3fb\u200d\u2640|\U0001f937\U0001f3fc\u200d\u2640|\U0001f937\U0001f3fd\u200d\u2640|\U0001f937\U0001f3fe\u200d\u2640|\U0001f937\U0001f3ff\u200d\u2640|\U0001f468\u200d\u2695\ufe0f|\U0001f468\U0001f3fb\u200d\u2695|\U0001f468\U0001f3fc\u200d\u2695|\U0001f468\U0001f3fd\u200d\u2695|\U0001f468\U0001f3fe\u200d\u2695|\U0001f468\U0001f3ff\u200d\u2695|\U0001f469\u200d\u2695\ufe0f|\U0001f469\U0001f3fb\u200d\u2695|\U0001f469\U0001f3fc\u200d\u2695|\U0001f469\U0001f3fd\u200d\u2695|\U0001f469\U0001f3fe\u200d\u2695|\U0001f469\U0001f3ff\u200d\u2695|\U0001f468\U0001f3fb\u200d\U0001f393|\U0001f468\U0001f3fc\u200d\U0001f393|\U0001f468\U0001f3fd\u200d\U0001f393|\U0001f468\U0001f3fe\u200d\U0001f393|\U0001f468\U0001f3ff\u200d\U0001f393|\U0001f469\U0001f3fb\u200d\U0001f393|\U0001f469\U0001f3fc\u200d\U0001f393|\U0001f469\U0001f3fd\u200d\U0001f393|\U0001f469\U0001f3fe\u200d\U0001f393|\U0001f469\U0001f3ff\u200d\U0001f393|\U0001f468\U0001f3fb\u200d\U0001f3eb|\U0001f468\U0001f3fc\u200d\U0001f3eb|\U0001f468\U0001f3fd\u200d\U0001f3eb|\U0001f468\U0001f3fe\u200d\U0001f3eb|\U0001f468\U0001f3ff\u200d\U0001f3eb|\U0001f469\U0001f3fb\u200d\U0001f3eb|\U0001f469\U0001f3fc\u200d\U0001f3eb|\U0001f469\U0001f3fd\u200d\U0001f3eb|\U0001f469\U0001f3fe\u200d\U0001f3eb|\U0001f469\U0001f3ff\u200d\U0001f3eb|\U0001f468\u200d\u2696\ufe0f|\U0001f468\U0001f3fb\u200d\u2696|\U0001f468\U0001f3fc\u200d\u2696|\U0001f468\U0001f3fd\u200d\u2696|\U0001f468\U0001f3fe\u200d\u2696|\U0001f468\U0001f3ff\u200d\u2696|\U0001f469\u200d\u2696\ufe0f|\U0001f469\U0001f3fb\u200d\u2696|\U0001f469\U0001f3fc\u200d\u2696|\U0001f469\U0001f3fd\u200d\u2696|\U0001f469\U0001f3fe\u200d\u2696|\U0001f469\U0001f3ff\u200d\u2696|\U0001f468\U0001f3fb\u200d\U0001f33e|\U0001f468\U0001f3fc\u200d\U0001f33e|\U0001f468\U0001f3fd\u200d\U0001f33e|\U0001f468\U0001f3fe\u200d\U0001f33e|\U0001f468\U0001f3ff\u200d\U0001f33e|\U0001f469\U0001f3fb\u200d\U0001f33e|\U0001f469\U0001f3fc\u200d\U0001f33e|\U0001f469\U0001f3fd\u200d\U0001f33e|\U0001f469\U0001f3fe\u200d\U0001f33e|\U0001f469\U0001f3ff\u200d\U0001f33e|\U0001f468\U0001f3fb\u200d\U0001f373|\U0001f468\U0001f3fc\u200d\U0001f373|\U0001f468\U0001f3fd\u200d\U0001f373|\U0001f468\U0001f3fe\u200d\U0001f373|\U0001f468\U0001f3ff\u200d\U0001f373|\U0001f469\U0001f3fb\u200d\U0001f373|\U0001f469\U0001f3fc\u200d\U0001f373|\U0001f469\U0001f3fd\u200d\U0001f373|\U0001f469\U0001f3fe\u200d\U0001f373|\U0001f469\U0001f3ff\u200d\U0001f373|\U0001f468\U0001f3fb\u200d\U0001f527|\U0001f468\U0001f3fc\u200d\U0001f527|\U0001f468\U0001f3fd\u200d\U0001f527|\U0001f468\U0001f3fe\u200d\U0001f527|\U0001f468\U0001f3ff\u200d\U0001f527|\U0001f469\U0001f3fb\u200d\U0001f527|\U0001f469\U0001f3fc\u200d\U0001f527|\U0001f469\U0001f3fd\u200d\U0001f527|\U0001f469\U0001f3fe\u200d\U0001f527|\U0001f469\U0001f3ff\u200d\U0001f527|\U0001f468\U0001f3fb\u200d\U0001f3ed|\U0001f468\U0001f3fc\u200d\U0001f3ed|\U0001f468\U0001f3fd\u200d\U0001f3ed|\U0001f468\U0001f3fe\u200d\U0001f3ed|\U0001f468\U0001f3ff\u200d\U0001f3ed|\U0001f469\U0001f3fb\u200d\U0001f3ed|\U0001f469\U0001f3fc\u200d\U0001f3ed|\U0001f469\U0001f3fd\u200d\U0001f3ed|\U0001f469\U0001f3fe\u200d\U0001f3ed|\U0001f469\U0001f3ff\u200d\U0001f3ed|\U0001f468\U0001f3fb\u200d\U0001f4bc|\U0001f468\U0001f3fc\u200d\U0001f4bc|\U0001f468\U0001f3fd\u200d\U0001f4bc|\U0001f468\U0001f3fe\u200d\U0001f4bc|\U0001f468\U0001f3ff\u200d\U0001f4bc|\U0001f469\U0001f3fb\u200d\U0001f4bc|\U0001f469\U0001f3fc\u200d\U0001f4bc|\U0001f469\U0001f3fd\u200d\U0001f4bc|\U0001f469\U0001f3fe\u200d\U0001f4bc|\U0001f469\U0001f3ff\u200d\U0001f4bc|\U0001f468\U0001f3fb\u200d\U0001f52c|\U0001f468\U0001f3fc\u200d\U0001f52c|\U0001f468\U0001f3fd\u200d\U0001f52c|\U0001f468\U0001f3fe\u200d\U0001f52c|\U0001f468\U0001f3ff\u200d\U0001f52c|\U0001f469\U0001f3fb\u200d\U0001f52c|\U0001f469\U0001f3fc\u200d\U0001f52c|\U0001f469\U0001f3fd\u200d\U0001f52c|\U0001f469\U0001f3fe\u200d\U0001f52c|\U0001f469\U0001f3ff\u200d\U0001f52c|\U0001f468\U0001f3fb\u200d\U0001f4bb|\U0001f468\U0001f3fc\u200d\U0001f4bb|\U0001f468\U0001f3fd\u200d\U0001f4bb|\U0001f468\U0001f3fe\u200d\U0001f4bb|\U0001f468\U0001f3ff\u200d\U0001f4bb|\U0001f469\U0001f3fb\u200d\U0001f4bb|\U0001f469\U0001f3fc\u200d\U0001f4bb|\U0001f469\U0001f3fd\u200d\U0001f4bb|\U0001f469\U0001f3fe\u200d\U0001f4bb|\U0001f469\U0001f3ff\u200d\U0001f4bb|\U0001f468\U0001f3fb\u200d\U0001f3a4|\U0001f468\U0001f3fc\u200d\U0001f3a4|\U0001f468\U0001f3fd\u200d\U0001f3a4|\U0001f468\U0001f3fe\u200d\U0001f3a4|\U0001f468\U0001f3ff\u200d\U0001f3a4|\U0001f469\U0001f3fb\u200d\U0001f3a4|\U0001f469\U0001f3fc\u200d\U0001f3a4|\U0001f469\U0001f3fd\u200d\U0001f3a4|\U0001f469\U0001f3fe\u200d\U0001f3a4|\U0001f469\U0001f3ff\u200d\U0001f3a4|\U0001f468\U0001f3fb\u200d\U0001f3a8|\U0001f468\U0001f3fc\u200d\U0001f3a8|\U0001f468\U0001f3fd\u200d\U0001f3a8|\U0001f468\U0001f3fe\u200d\U0001f3a8|\U0001f468\U0001f3ff\u200d\U0001f3a8|\U0001f469\U0001f3fb\u200d\U0001f3a8|\U0001f469\U0001f3fc\u200d\U0001f3a8|\U0001f469\U0001f3fd\u200d\U0001f3a8|\U0001f469\U0001f3fe\u200d\U0001f3a8|\U0001f469\U0001f3ff\u200d\U0001f3a8|\U0001f468\u200d\u2708\ufe0f|\U0001f468\U0001f3fb\u200d\u2708|\U0001f468\U0001f3fc\u200d\u2708|\U0001f468\U0001f3fd\u200d\u2708|\U0001f468\U0001f3fe\u200d\u2708|\U0001f468\U0001f3ff\u200d\u2708|\U0001f469\u200d\u2708\ufe0f|\U0001f469\U0001f3fb\u200d\u2708|\U0001f469\U0001f3fc\u200d\u2708|\U0001f469\U0001f3fd\u200d\u2708|\U0001f469\U0001f3fe\u200d\u2708|\U0001f469\U0001f3ff\u200d\u2708|\U0001f468\U0001f3fb\u200d\U0001f680|\U0001f468\U0001f3fc\u200d\U0001f680|\U0001f468\U0001f3fd\u200d\U0001f680|\U0001f468\U0001f3fe\u200d\U0001f680|\U0001f468\U0001f3ff\u200d\U0001f680|\U0001f469\U0001f3fb\u200d\U0001f680|\U0001f469\U0001f3fc\u200d\U0001f680|\U0001f469\U0001f3fd\u200d\U0001f680|\U0001f469\U0001f3fe\u200d\U0001f680|\U0001f469\U0001f3ff\u200d\U0001f680|\U0001f468\U0001f3fb\u200d\U0001f692|\U0001f468\U0001f3fc\u200d\U0001f692|\U0001f468\U0001f3fd\u200d\U0001f692|\U0001f468\U0001f3fe\u200d\U0001f692|\U0001f468\U0001f3ff\u200d\U0001f692|\U0001f469\U0001f3fb\u200d\U0001f692|\U0001f469\U0001f3fc\u200d\U0001f692|\U0001f469\U0001f3fd\u200d\U0001f692|\U0001f469\U0001f3fe\u200d\U0001f692|\U0001f469\U0001f3ff\u200d\U0001f692|\U0001f46e\u200d\u2642\ufe0f|\U0001f46e\U0001f3fb\u200d\u2642|\U0001f46e\U0001f3fc\u200d\u2642|\U0001f46e\U0001f3fd\u200d\u2642|\U0001f46e\U0001f3fe\u200d\u2642|\U0001f46e\U0001f3ff\u200d\u2642|\U0001f46e\u200d\u2640\ufe0f|\U0001f46e\U0001f3fb\u200d\u2640|\U0001f46e\U0001f3fc\u200d\u2640|\U0001f46e\U0001f3fd\u200d\u2640|\U0001f46e\U0001f3fe\u200d\u2640|\U0001f46e\U0001f3ff\u200d\u2640|\U0001f575\u200d\u2642\ufe0f|\U0001f575\ufe0f\u200d\u2642|\U0001f575\U0001f3fb\u200d\u2642|\U0001f575\U0001f3fc\u200d\u2642|\U0001f575\U0001f3fd\u200d\u2642|\U0001f575\U0001f3fe\u200d\u2642|\U0001f575\U0001f3ff\u200d\u2642|\U0001f575\u200d\u2640\ufe0f|\U0001f575\ufe0f\u200d\u2640|\U0001f575\U0001f3fb\u200d\u2640|\U0001f575\U0001f3fc\u200d\u2640|\U0001f575\U0001f3fd\u200d\u2640|\U0001f575\U0001f3fe\u200d\u2640|\U0001f575\U0001f3ff\u200d\u2640|\U0001f482\u200d\u2642\ufe0f|\U0001f482\U0001f3fb\u200d\u2642|\U0001f482\U0001f3fc\u200d\u2642|\U0001f482\U0001f3fd\u200d\u2642|\U0001f482\U0001f3fe\u200d\u2642|\U0001f482\U0001f3ff\u200d\u2642|\U0001f482\u200d\u2640\ufe0f|\U0001f482\U0001f3fb\u200d\u2640|\U0001f482\U0001f3fc\u200d\u2640|\U0001f482\U0001f3fd\u200d\u2640|\U0001f482\U0001f3fe\u200d\u2640|\U0001f482\U0001f3ff\u200d\u2640|\U0001f477\u200d\u2642\ufe0f|\U0001f477\U0001f3fb\u200d\u2642|\U0001f477\U0001f3fc\u200d\u2642|\U0001f477\U0001f3fd\u200d\u2642|\U0001f477\U0001f3fe\u200d\u2642|\U0001f477\U0001f3ff\u200d\u2642|\U0001f477\u200d\u2640\ufe0f|\U0001f477\U0001f3fb\u200d\u2640|\U0001f477\U0001f3fc\u200d\u2640|\U0001f477\U0001f3fd\u200d\u2640|\U0001f477\U0001f3fe\u200d\u2640|\U0001f477\U0001f3ff\u200d\u2640|\U0001f473\u200d\u2642\ufe0f|\U0001f473\U0001f3fb\u200d\u2642|\U0001f473\U0001f3fc\u200d\u2642|\U0001f473\U0001f3fd\u200d\u2642|\U0001f473\U0001f3fe\u200d\u2642|\U0001f473\U0001f3ff\u200d\u2642|\U0001f473\u200d\u2640\ufe0f|\U0001f473\U0001f3fb\u200d\u2640|\U0001f473\U0001f3fc\u200d\u2640|\U0001f473\U0001f3fd\u200d\u2640|\U0001f473\U0001f3fe\u200d\u2640|\U0001f473\U0001f3ff\u200d\u2640|\U0001f9b8\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2642|\U0001f9b8\U0001f3fc\u200d\u2642|\U0001f9b8\U0001f3fd\u200d\u2642|\U0001f9b8\U0001f3fe\u200d\u2642|\U0001f9b8\U0001f3ff\u200d\u2642|\U0001f9b8\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2640|\U0001f9b8\U0001f3fc\u200d\u2640|\U0001f9b8\U0001f3fd\u200d\u2640|\U0001f9b8\U0001f3fe\u200d\u2640|\U0001f9b8\U0001f3ff\u200d\u2640|\U0001f9b9\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2642|\U0001f9b9\U0001f3fc\u200d\u2642|\U0001f9b9\U0001f3fd\u200d\u2642|\U0001f9b9\U0001f3fe\u200d\u2642|\U0001f9b9\U0001f3ff\u200d\u2642|\U0001f9b9\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2640|\U0001f9b9\U0001f3fc\u200d\u2640|\U0001f9b9\U0001f3fd\u200d\u2640|\U0001f9b9\U0001f3fe\u200d\u2640|\U0001f9b9\U0001f3ff\u200d\u2640|\U0001f9d9\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2642|\U0001f9d9\U0001f3fc\u200d\u2642|\U0001f9d9\U0001f3fd\u200d\u2642|\U0001f9d9\U0001f3fe\u200d\u2642|\U0001f9d9\U0001f3ff\u200d\u2642|\U0001f9d9\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2640|\U0001f9d9\U0001f3fc\u200d\u2640|\U0001f9d9\U0001f3fd\u200d\u2640|\U0001f9d9\U0001f3fe\u200d\u2640|\U0001f9d9\U0001f3ff\u200d\u2640|\U0001f9da\u200d\u2642\ufe0f|\U0001f9da\U0001f3fb\u200d\u2642|\U0001f9da\U0001f3fc\u200d\u2642|\U0001f9da\U0001f3fd\u200d\u2642|\U0001f9da\U0001f3fe\u200d\u2642|\U0001f9da\U0001f3ff\u200d\u2642|\U0001f9da\u200d\u2640\ufe0f|\U0001f9da\U0001f3fb\u200d\u2640|\U0001f9da\U0001f3fc\u200d\u2640|\U0001f9da\U0001f3fd\u200d\u2640|\U0001f9da\U0001f3fe\u200d\u2640|\U0001f9da\U0001f3ff\u200d\u2640|\U0001f9db\u200d\u2642\ufe0f|\U0001f9db\U0001f3fb\u200d\u2642|\U0001f9db\U0001f3fc\u200d\u2642|\U0001f9db\U0001f3fd\u200d\u2642|\U0001f9db\U0001f3fe\u200d\u2642|\U0001f9db\U0001f3ff\u200d\u2642|\U0001f9db\u200d\u2640\ufe0f|\U0001f9db\U0001f3fb\u200d\u2640|\U0001f9db\U0001f3fc\u200d\u2640|\U0001f9db\U0001f3fd\u200d\u2640|\U0001f9db\U0001f3fe\u200d\u2640|\U0001f9db\U0001f3ff\u200d\u2640|\U0001f9dc\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2642|\U0001f9dc\U0001f3fc\u200d\u2642|\U0001f9dc\U0001f3fd\u200d\u2642|\U0001f9dc\U0001f3fe\u200d\u2642|\U0001f9dc\U0001f3ff\u200d\u2642|\U0001f9dc\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2640|\U0001f9dc\U0001f3fc\u200d\u2640|\U0001f9dc\U0001f3fd\u200d\u2640|\U0001f9dc\U0001f3fe\u200d\u2640|\U0001f9dc\U0001f3ff\u200d\u2640|\U0001f9dd\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2642|\U0001f9dd\U0001f3fc\u200d\u2642|\U0001f9dd\U0001f3fd\u200d\u2642|\U0001f9dd\U0001f3fe\u200d\u2642|\U0001f9dd\U0001f3ff\u200d\u2642|\U0001f9dd\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2640|\U0001f9dd\U0001f3fc\u200d\u2640|\U0001f9dd\U0001f3fd\u200d\u2640|\U0001f9dd\U0001f3fe\u200d\u2640|\U0001f9dd\U0001f3ff\u200d\u2640|\U0001f9de\u200d\u2642\ufe0f|\U0001f9de\u200d\u2640\ufe0f|\U0001f9df\u200d\u2642\ufe0f|\U0001f9df\u200d\u2640\ufe0f|\U0001f486\u200d\u2642\ufe0f|\U0001f486\U0001f3fb\u200d\u2642|\U0001f486\U0001f3fc\u200d\u2642|\U0001f486\U0001f3fd\u200d\u2642|\U0001f486\U0001f3fe\u200d\u2642|\U0001f486\U0001f3ff\u200d\u2642|\U0001f486\u200d\u2640\ufe0f|\U0001f486\U0001f3fb\u200d\u2640|\U0001f486\U0001f3fc\u200d\u2640|\U0001f486\U0001f3fd\u200d\u2640|\U0001f486\U0001f3fe\u200d\u2640|\U0001f486\U0001f3ff\u200d\u2640|\U0001f487\u200d\u2642\ufe0f|\U0001f487\U0001f3fb\u200d\u2642|\U0001f487\U0001f3fc\u200d\u2642|\U0001f487\U0001f3fd\u200d\u2642|\U0001f487\U0001f3fe\u200d\u2642|\U0001f487\U0001f3ff\u200d\u2642|\U0001f487\u200d\u2640\ufe0f|\U0001f487\U0001f3fb\u200d\u2640|\U0001f487\U0001f3fc\u200d\u2640|\U0001f487\U0001f3fd\u200d\u2640|\U0001f487\U0001f3fe\u200d\u2640|\U0001f487\U0001f3ff\u200d\u2640|\U0001f6b6\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2642|\U0001f6b6\U0001f3fc\u200d\u2642|\U0001f6b6\U0001f3fd\u200d\u2642|\U0001f6b6\U0001f3fe\u200d\u2642|\U0001f6b6\U0001f3ff\u200d\u2642|\U0001f6b6\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2640|\U0001f6b6\U0001f3fc\u200d\u2640|\U0001f6b6\U0001f3fd\u200d\u2640|\U0001f6b6\U0001f3fe\u200d\u2640|\U0001f6b6\U0001f3ff\u200d\u2640|\U0001f9cd\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2642|\U0001f9cd\U0001f3fc\u200d\u2642|\U0001f9cd\U0001f3fd\u200d\u2642|\U0001f9cd\U0001f3fe\u200d\u2642|\U0001f9cd\U0001f3ff\u200d\u2642|\U0001f9cd\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2640|\U0001f9cd\U0001f3fc\u200d\u2640|\U0001f9cd\U0001f3fd\u200d\u2640|\U0001f9cd\U0001f3fe\u200d\u2640|\U0001f9cd\U0001f3ff\u200d\u2640|\U0001f9ce\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2642|\U0001f9ce\U0001f3fc\u200d\u2642|\U0001f9ce\U0001f3fd\u200d\u2642|\U0001f9ce\U0001f3fe\u200d\u2642|\U0001f9ce\U0001f3ff\u200d\u2642|\U0001f9ce\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2640|\U0001f9ce\U0001f3fc\u200d\u2640|\U0001f9ce\U0001f3fd\u200d\u2640|\U0001f9ce\U0001f3fe\u200d\u2640|\U0001f9ce\U0001f3ff\u200d\u2640|\U0001f468\U0001f3fb\u200d\U0001f9af|\U0001f468\U0001f3fc\u200d\U0001f9af|\U0001f468\U0001f3fd\u200d\U0001f9af|\U0001f468\U0001f3fe\u200d\U0001f9af|\U0001f468\U0001f3ff\u200d\U0001f9af|\U0001f469\U0001f3fb\u200d\U0001f9af|\U0001f469\U0001f3fc\u200d\U0001f9af|\U0001f469\U0001f3fd\u200d\U0001f9af|\U0001f469\U0001f3fe\u200d\U0001f9af|\U0001f469\U0001f3ff\u200d\U0001f9af|\U0001f468\U0001f3fb\u200d\U0001f9bc|\U0001f468\U0001f3fc\u200d\U0001f9bc|\U0001f468\U0001f3fd\u200d\U0001f9bc|\U0001f468\U0001f3fe\u200d\U0001f9bc|\U0001f468\U0001f3ff\u200d\U0001f9bc|\U0001f469\U0001f3fb\u200d\U0001f9bc|\U0001f469\U0001f3fc\u200d\U0001f9bc|\U0001f469\U0001f3fd\u200d\U0001f9bc|\U0001f469\U0001f3fe\u200d\U0001f9bc|\U0001f469\U0001f3ff\u200d\U0001f9bc|\U0001f468\U0001f3fb\u200d\U0001f9bd|\U0001f468\U0001f3fc\u200d\U0001f9bd|\U0001f468\U0001f3fd\u200d\U0001f9bd|\U0001f468\U0001f3fe\u200d\U0001f9bd|\U0001f468\U0001f3ff\u200d\U0001f9bd|\U0001f469\U0001f3fb\u200d\U0001f9bd|\U0001f469\U0001f3fc\u200d\U0001f9bd|\U0001f469\U0001f3fd\u200d\U0001f9bd|\U0001f469\U0001f3fe\u200d\U0001f9bd|\U0001f469\U0001f3ff\u200d\U0001f9bd|\U0001f3c3\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2642|\U0001f3c3\U0001f3fc\u200d\u2642|\U0001f3c3\U0001f3fd\u200d\u2642|\U0001f3c3\U0001f3fe\u200d\u2642|\U0001f3c3\U0001f3ff\u200d\u2642|\U0001f3c3\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2640|\U0001f3c3\U0001f3fc\u200d\u2640|\U0001f3c3\U0001f3fd\u200d\u2640|\U0001f3c3\U0001f3fe\u200d\u2640|\U0001f3c3\U0001f3ff\u200d\u2640|\U0001f46f\u200d\u2642\ufe0f|\U0001f46f\u200d\u2640\ufe0f|\U0001f9d6\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2642|\U0001f9d6\U0001f3fc\u200d\u2642|\U0001f9d6\U0001f3fd\u200d\u2642|\U0001f9d6\U0001f3fe\u200d\u2642|\U0001f9d6\U0001f3ff\u200d\u2642|\U0001f9d6\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2640|\U0001f9d6\U0001f3fc\u200d\u2640|\U0001f9d6\U0001f3fd\u200d\u2640|\U0001f9d6\U0001f3fe\u200d\u2640|\U0001f9d6\U0001f3ff\u200d\u2640|\U0001f9d7\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2642|\U0001f9d7\U0001f3fc\u200d\u2642|\U0001f9d7\U0001f3fd\u200d\u2642|\U0001f9d7\U0001f3fe\u200d\u2642|\U0001f9d7\U0001f3ff\u200d\u2642|\U0001f9d7\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2640|\U0001f9d7\U0001f3fc\u200d\u2640|\U0001f9d7\U0001f3fd\u200d\u2640|\U0001f9d7\U0001f3fe\u200d\u2640|\U0001f9d7\U0001f3ff\u200d\u2640|\U0001f3cc\u200d\u2642\ufe0f|\U0001f3cc\ufe0f\u200d\u2642|\U0001f3cc\U0001f3fb\u200d\u2642|\U0001f3cc\U0001f3fc\u200d\u2642|\U0001f3cc\U0001f3fd\u200d\u2642|\U0001f3cc\U0001f3fe\u200d\u2642|\U0001f3cc\U0001f3ff\u200d\u2642|\U0001f3cc\u200d\u2640\ufe0f|\U0001f3cc\ufe0f\u200d\u2640|\U0001f3cc\U0001f3fb\u200d\u2640|\U0001f3cc\U0001f3fc\u200d\u2640|\U0001f3cc\U0001f3fd\u200d\u2640|\U0001f3cc\U0001f3fe\u200d\u2640|\U0001f3cc\U0001f3ff\u200d\u2640|\U0001f3c4\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2642|\U0001f3c4\U0001f3fc\u200d\u2642|\U0001f3c4\U0001f3fd\u200d\u2642|\U0001f3c4\U0001f3fe\u200d\u2642|\U0001f3c4\U0001f3ff\u200d\u2642|\U0001f3c4\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2640|\U0001f3c4\U0001f3fc\u200d\u2640|\U0001f3c4\U0001f3fd\u200d\u2640|\U0001f3c4\U0001f3fe\u200d\u2640|\U0001f3c4\U0001f3ff\u200d\u2640|\U0001f6a3\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2642|\U0001f6a3\U0001f3fc\u200d\u2642|\U0001f6a3\U0001f3fd\u200d\u2642|\U0001f6a3\U0001f3fe\u200d\u2642|\U0001f6a3\U0001f3ff\u200d\u2642|\U0001f6a3\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2640|\U0001f6a3\U0001f3fc\u200d\u2640|\U0001f6a3\U0001f3fd\u200d\u2640|\U0001f6a3\U0001f3fe\u200d\u2640|\U0001f6a3\U0001f3ff\u200d\u2640|\U0001f3ca\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2642|\U0001f3ca\U0001f3fc\u200d\u2642|\U0001f3ca\U0001f3fd\u200d\u2642|\U0001f3ca\U0001f3fe\u200d\u2642|\U0001f3ca\U0001f3ff\u200d\u2642|\U0001f3ca\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2640|\U0001f3ca\U0001f3fc\u200d\u2640|\U0001f3ca\U0001f3fd\u200d\u2640|\U0001f3ca\U0001f3fe\u200d\u2640|\U0001f3ca\U0001f3ff\u200d\u2640|\u26f9\u200d\u2642\ufe0f|\u26f9\ufe0f\u200d\u2642|\u26f9\U0001f3fb\u200d\u2642|\u26f9\U0001f3fc\u200d\u2642|\u26f9\U0001f3fd\u200d\u2642|\u26f9\U0001f3fe\u200d\u2642|\u26f9\U0001f3ff\u200d\u2642|\u26f9\u200d\u2640\ufe0f|\u26f9\ufe0f\u200d\u2640|\u26f9\U0001f3fb\u200d\u2640|\u26f9\U0001f3fc\u200d\u2640|\u26f9\U0001f3fd\u200d\u2640|\u26f9\U0001f3fe\u200d\u2640|\u26f9\U0001f3ff\u200d\u2640|\U0001f3cb\u200d\u2642\ufe0f|\U0001f3cb\ufe0f\u200d\u2642|\U0001f3cb\U0001f3fb\u200d\u2642|\U0001f3cb\U0001f3fc\u200d\u2642|\U0001f3cb\U0001f3fd\u200d\u2642|\U0001f3cb\U0001f3fe\u200d\u2642|\U0001f3cb\U0001f3ff\u200d\u2642|\U0001f3cb\u200d\u2640\ufe0f|\U0001f3cb\ufe0f\u200d\u2640|\U0001f3cb\U0001f3fb\u200d\u2640|\U0001f3cb\U0001f3fc\u200d\u2640|\U0001f3cb\U0001f3fd\u200d\u2640|\U0001f3cb\U0001f3fe\u200d\u2640|\U0001f3cb\U0001f3ff\u200d\u2640|\U0001f6b4\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2642|\U0001f6b4\U0001f3fc\u200d\u2642|\U0001f6b4\U0001f3fd\u200d\u2642|\U0001f6b4\U0001f3fe\u200d\u2642|\U0001f6b4\U0001f3ff\u200d\u2642|\U0001f6b4\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2640|\U0001f6b4\U0001f3fc\u200d\u2640|\U0001f6b4\U0001f3fd\u200d\u2640|\U0001f6b4\U0001f3fe\u200d\u2640|\U0001f6b4\U0001f3ff\u200d\u2640|\U0001f6b5\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2642|\U0001f6b5\U0001f3fc\u200d\u2642|\U0001f6b5\U0001f3fd\u200d\u2642|\U0001f6b5\U0001f3fe\u200d\u2642|\U0001f6b5\U0001f3ff\u200d\u2642|\U0001f6b5\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2640|\U0001f6b5\U0001f3fc\u200d\u2640|\U0001f6b5\U0001f3fd\u200d\u2640|\U0001f6b5\U0001f3fe\u200d\u2640|\U0001f6b5\U0001f3ff\u200d\u2640|\U0001f938\u200d\u2642\ufe0f|\U0001f938\U0001f3fb\u200d\u2642|\U0001f938\U0001f3fc\u200d\u2642|\U0001f938\U0001f3fd\u200d\u2642|\U0001f938\U0001f3fe\u200d\u2642|\U0001f938\U0001f3ff\u200d\u2642|\U0001f938\u200d\u2640\ufe0f|\U0001f938\U0001f3fb\u200d\u2640|\U0001f938\U0001f3fc\u200d\u2640|\U0001f938\U0001f3fd\u200d\u2640|\U0001f938\U0001f3fe\u200d\u2640|\U0001f938\U0001f3ff\u200d\u2640|\U0001f93c\u200d\u2642\ufe0f|\U0001f93c\u200d\u2640\ufe0f|\U0001f93d\u200d\u2642\ufe0f|\U0001f93d\U0001f3fb\u200d\u2642|\U0001f93d\U0001f3fc\u200d\u2642|\U0001f93d\U0001f3fd\u200d\u2642|\U0001f93d\U0001f3fe\u200d\u2642|\U0001f93d\U0001f3ff\u200d\u2642|\U0001f93d\u200d\u2640\ufe0f|\U0001f93d\U0001f3fb\u200d\u2640|\U0001f93d\U0001f3fc\u200d\u2640|\U0001f93d\U0001f3fd\u200d\u2640|\U0001f93d\U0001f3fe\u200d\u2640|\U0001f93d\U0001f3ff\u200d\u2640|\U0001f93e\u200d\u2642\ufe0f|\U0001f93e\U0001f3fb\u200d\u2642|\U0001f93e\U0001f3fc\u200d\u2642|\U0001f93e\U0001f3fd\u200d\u2642|\U0001f93e\U0001f3fe\u200d\u2642|\U0001f93e\U0001f3ff\u200d\u2642|\U0001f93e\u200d\u2640\ufe0f|\U0001f93e\U0001f3fb\u200d\u2640|\U0001f93e\U0001f3fc\u200d\u2640|\U0001f93e\U0001f3fd\u200d\u2640|\U0001f93e\U0001f3fe\u200d\u2640|\U0001f93e\U0001f3ff\u200d\u2640|\U0001f939\u200d\u2642\ufe0f|\U0001f939\U0001f3fb\u200d\u2642|\U0001f939\U0001f3fc\u200d\u2642|\U0001f939\U0001f3fd\u200d\u2642|\U0001f939\U0001f3fe\u200d\u2642|\U0001f939\U0001f3ff\u200d\u2642|\U0001f939\u200d\u2640\ufe0f|\U0001f939\U0001f3fb\u200d\u2640|\U0001f939\U0001f3fc\u200d\u2640|\U0001f939\U0001f3fd\u200d\u2640|\U0001f939\U0001f3fe\u200d\u2640|\U0001f939\U0001f3ff\u200d\u2640|\U0001f9d8\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2642|\U0001f9d8\U0001f3fc\u200d\u2642|\U0001f9d8\U0001f3fd\u200d\u2642|\U0001f9d8\U0001f3fe\u200d\u2642|\U0001f9d8\U0001f3ff\u200d\u2642|\U0001f9d8\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2640|\U0001f9d8\U0001f3fc\u200d\u2640|\U0001f9d8\U0001f3fd\u200d\u2640|\U0001f9d8\U0001f3fe\u200d\u2640|\U0001f9d8\U0001f3ff\u200d\u2640|\U0001f3f3\ufe0f\u200d\U0001f308|\U0001f3f4\u200d\u2620\ufe0f|\U0001f441\u200d\U0001f5e8|\U0001f471\u200d\u2642|\U0001f468\u200d\U0001f9b0|\U0001f468\u200d\U0001f9b1|\U0001f468\u200d\U0001f9b3|\U0001f468\u200d\U0001f9b2|\U0001f471\u200d\u2640|\U0001f469\u200d\U0001f9b0|\U0001f469\u200d\U0001f9b1|\U0001f469\u200d\U0001f9b3|\U0001f469\u200d\U0001f9b2|\U0001f64d\u200d\u2642|\U0001f64d\u200d\u2640|\U0001f64e\u200d\u2642|\U0001f64e\u200d\u2640|\U0001f645\u200d\u2642|\U0001f645\u200d\u2640|\U0001f646\u200d\u2642|\U0001f646\u200d\u2640|\U0001f481\u200d\u2642|\U0001f481\u200d\u2640|\U0001f64b\u200d\u2642|\U0001f64b\u200d\u2640|\U0001f9cf\u200d\u2642|\U0001f9cf\u200d\u2640|\U0001f647\u200d\u2642|\U0001f647\u200d\u2640|\U0001f926\u200d\u2642|\U0001f926\u200d\u2640|\U0001f937\u200d\u2642|\U0001f937\u200d\u2640|\U0001f468\u200d\u2695|\U0001f469\u200d\u2695|\U0001f468\u200d\U0001f393|\U0001f469\u200d\U0001f393|\U0001f468\u200d\U0001f3eb|\U0001f469\u200d\U0001f3eb|\U0001f468\u200d\u2696|\U0001f469\u200d\u2696|\U0001f468\u200d\U0001f33e|\U0001f469\u200d\U0001f33e|\U0001f468\u200d\U0001f373|\U0001f469\u200d\U0001f373|\U0001f468\u200d\U0001f527|\U0001f469\u200d\U0001f527|\U0001f468\u200d\U0001f3ed|\U0001f469\u200d\U0001f3ed|\U0001f468\u200d\U0001f4bc|\U0001f469\u200d\U0001f4bc|\U0001f468\u200d\U0001f52c|\U0001f469\u200d\U0001f52c|\U0001f468\u200d\U0001f4bb|\U0001f469\u200d\U0001f4bb|\U0001f468\u200d\U0001f3a4|\U0001f469\u200d\U0001f3a4|\U0001f468\u200d\U0001f3a8|\U0001f469\u200d\U0001f3a8|\U0001f468\u200d\u2708|\U0001f469\u200d\u2708|\U0001f468\u200d\U0001f680|\U0001f469\u200d\U0001f680|\U0001f468\u200d\U0001f692|\U0001f469\u200d\U0001f692|\U0001f46e\u200d\u2642|\U0001f46e\u200d\u2640|\U0001f575\u200d\u2642|\U0001f575\u200d\u2640|\U0001f482\u200d\u2642|\U0001f482\u200d\u2640|\U0001f477\u200d\u2642|\U0001f477\u200d\u2640|\U0001f473\u200d\u2642|\U0001f473\u200d\u2640|\U0001f9b8\u200d\u2642|\U0001f9b8\u200d\u2640|\U0001f9b9\u200d\u2642|\U0001f9b9\u200d\u2640|\U0001f9d9\u200d\u2642|\U0001f9d9\u200d\u2640|\U0001f9da\u200d\u2642|\U0001f9da\u200d\u2640|\U0001f9db\u200d\u2642|\U0001f9db\u200d\u2640|\U0001f9dc\u200d\u2642|\U0001f9dc\u200d\u2640|\U0001f9dd\u200d\u2642|\U0001f9dd\u200d\u2640|\U0001f9de\u200d\u2642|\U0001f9de\u200d\u2640|\U0001f9df\u200d\u2642|\U0001f9df\u200d\u2640|\U0001f486\u200d\u2642|\U0001f486\u200d\u2640|\U0001f487\u200d\u2642|\U0001f487\u200d\u2640|\U0001f6b6\u200d\u2642|\U0001f6b6\u200d\u2640|\U0001f9cd\u200d\u2642|\U0001f9cd\u200d\u2640|\U0001f9ce\u200d\u2642|\U0001f9ce\u200d\u2640|\U0001f468\u200d\U0001f9af|\U0001f469\u200d\U0001f9af|\U0001f468\u200d\U0001f9bc|\U0001f469\u200d\U0001f9bc|\U0001f468\u200d\U0001f9bd|\U0001f469\u200d\U0001f9bd|\U0001f3c3\u200d\u2642|\U0001f3c3\u200d\u2640|\U0001f46f\u200d\u2642|\U0001f46f\u200d\u2640|\U0001f9d6\u200d\u2642|\U0001f9d6\u200d\u2640|\U0001f9d7\u200d\u2642|\U0001f9d7\u200d\u2640|\U0001f3cc\u200d\u2642|\U0001f3cc\u200d\u2640|\U0001f3c4\u200d\u2642|\U0001f3c4\u200d\u2640|\U0001f6a3\u200d\u2642|\U0001f6a3\u200d\u2640|\U0001f3ca\u200d\u2642|\U0001f3ca\u200d\u2640|\u26f9\u200d\u2642|\u26f9\u200d\u2640|\U0001f3cb\u200d\u2642|\U0001f3cb\u200d\u2640|\U0001f6b4\u200d\u2642|\U0001f6b4\u200d\u2640|\U0001f6b5\u200d\u2642|\U0001f6b5\u200d\u2640|\U0001f938\u200d\u2642|\U0001f938\u200d\u2640|\U0001f93c\u200d\u2642|\U0001f93c\u200d\u2640|\U0001f93d\u200d\u2642|\U0001f93d\u200d\u2640|\U0001f93e\u200d\u2642|\U0001f93e\u200d\u2640|\U0001f939\u200d\u2642|\U0001f939\u200d\u2640|\U0001f9d8\u200d\u2642|\U0001f9d8\u200d\u2640|\U0001f468\u200d\U0001f466|\U0001f468\u200d\U0001f467|\U0001f469\u200d\U0001f466|\U0001f469\u200d\U0001f467|\U0001f415\u200d\U0001f9ba|\\#\ufe0f\u20e3|\\*\ufe0f\u20e3|0\ufe0f\u20e3|1\ufe0f\u20e3|2\ufe0f\u20e3|3\ufe0f\u20e3|4\ufe0f\u20e3|5\ufe0f\u20e3|6\ufe0f\u20e3|7\ufe0f\u20e3|8\ufe0f\u20e3|9\ufe0f\u20e3|\U0001f3f3\u200d\U0001f308|\U0001f3f4\u200d\u2620|\u263a\ufe0f|\u2639\ufe0f|\u2620\ufe0f|\u2763\ufe0f|\u2764\ufe0f|\U0001f573\ufe0f|\U0001f5e8\ufe0f|\U0001f5ef\ufe0f|\U0001f44b\U0001f3fb|\U0001f44b\U0001f3fc|\U0001f44b\U0001f3fd|\U0001f44b\U0001f3fe|\U0001f44b\U0001f3ff|\U0001f91a\U0001f3fb|\U0001f91a\U0001f3fc|\U0001f91a\U0001f3fd|\U0001f91a\U0001f3fe|\U0001f91a\U0001f3ff|\U0001f590\ufe0f|\U0001f590\U0001f3fb|\U0001f590\U0001f3fc|\U0001f590\U0001f3fd|\U0001f590\U0001f3fe|\U0001f590\U0001f3ff|\u270b\U0001f3fb|\u270b\U0001f3fc|\u270b\U0001f3fd|\u270b\U0001f3fe|\u270b\U0001f3ff|\U0001f596\U0001f3fb|\U0001f596\U0001f3fc|\U0001f596\U0001f3fd|\U0001f596\U0001f3fe|\U0001f596\U0001f3ff|\U0001f44c\U0001f3fb|\U0001f44c\U0001f3fc|\U0001f44c\U0001f3fd|\U0001f44c\U0001f3fe|\U0001f44c\U0001f3ff|\U0001f90f\U0001f3fb|\U0001f90f\U0001f3fc|\U0001f90f\U0001f3fd|\U0001f90f\U0001f3fe|\U0001f90f\U0001f3ff|\u270c\ufe0f|\u270c\U0001f3fb|\u270c\U0001f3fc|\u270c\U0001f3fd|\u270c\U0001f3fe|\u270c\U0001f3ff|\U0001f91e\U0001f3fb|\U0001f91e\U0001f3fc|\U0001f91e\U0001f3fd|\U0001f91e\U0001f3fe|\U0001f91e\U0001f3ff|\U0001f91f\U0001f3fb|\U0001f91f\U0001f3fc|\U0001f91f\U0001f3fd|\U0001f91f\U0001f3fe|\U0001f91f\U0001f3ff|\U0001f918\U0001f3fb|\U0001f918\U0001f3fc|\U0001f918\U0001f3fd|\U0001f918\U0001f3fe|\U0001f918\U0001f3ff|\U0001f919\U0001f3fb|\U0001f919\U0001f3fc|\U0001f919\U0001f3fd|\U0001f919\U0001f3fe|\U0001f919\U0001f3ff|\U0001f448\U0001f3fb|\U0001f448\U0001f3fc|\U0001f448\U0001f3fd|\U0001f448\U0001f3fe|\U0001f448\U0001f3ff|\U0001f449\U0001f3fb|\U0001f449\U0001f3fc|\U0001f449\U0001f3fd|\U0001f449\U0001f3fe|\U0001f449\U0001f3ff|\U0001f446\U0001f3fb|\U0001f446\U0001f3fc|\U0001f446\U0001f3fd|\U0001f446\U0001f3fe|\U0001f446\U0001f3ff|\U0001f595\U0001f3fb|\U0001f595\U0001f3fc|\U0001f595\U0001f3fd|\U0001f595\U0001f3fe|\U0001f595\U0001f3ff|\U0001f447\U0001f3fb|\U0001f447\U0001f3fc|\U0001f447\U0001f3fd|\U0001f447\U0001f3fe|\U0001f447\U0001f3ff|\u261d\ufe0f|\u261d\U0001f3fb|\u261d\U0001f3fc|\u261d\U0001f3fd|\u261d\U0001f3fe|\u261d\U0001f3ff|\U0001f44d\U0001f3fb|\U0001f44d\U0001f3fc|\U0001f44d\U0001f3fd|\U0001f44d\U0001f3fe|\U0001f44d\U0001f3ff|\U0001f44e\U0001f3fb|\U0001f44e\U0001f3fc|\U0001f44e\U0001f3fd|\U0001f44e\U0001f3fe|\U0001f44e\U0001f3ff|\u270a\U0001f3fb|\u270a\U0001f3fc|\u270a\U0001f3fd|\u270a\U0001f3fe|\u270a\U0001f3ff|\U0001f44a\U0001f3fb|\U0001f44a\U0001f3fc|\U0001f44a\U0001f3fd|\U0001f44a\U0001f3fe|\U0001f44a\U0001f3ff|\U0001f91b\U0001f3fb|\U0001f91b\U0001f3fc|\U0001f91b\U0001f3fd|\U0001f91b\U0001f3fe|\U0001f91b\U0001f3ff|\U0001f91c\U0001f3fb|\U0001f91c\U0001f3fc|\U0001f91c\U0001f3fd|\U0001f91c\U0001f3fe|\U0001f91c\U0001f3ff|\U0001f44f\U0001f3fb|\U0001f44f\U0001f3fc|\U0001f44f\U0001f3fd|\U0001f44f\U0001f3fe|\U0001f44f\U0001f3ff|\U0001f64c\U0001f3fb|\U0001f64c\U0001f3fc|\U0001f64c\U0001f3fd|\U0001f64c\U0001f3fe|\U0001f64c\U0001f3ff|\U0001f450\U0001f3fb|\U0001f450\U0001f3fc|\U0001f450\U0001f3fd|\U0001f450\U0001f3fe|\U0001f450\U0001f3ff|\U0001f932\U0001f3fb|\U0001f932\U0001f3fc|\U0001f932\U0001f3fd|\U0001f932\U0001f3fe|\U0001f932\U0001f3ff|\U0001f64f\U0001f3fb|\U0001f64f\U0001f3fc|\U0001f64f\U0001f3fd|\U0001f64f\U0001f3fe|\U0001f64f\U0001f3ff|\u270d\ufe0f|\u270d\U0001f3fb|\u270d\U0001f3fc|\u270d\U0001f3fd|\u270d\U0001f3fe|\u270d\U0001f3ff|\U0001f485\U0001f3fb|\U0001f485\U0001f3fc|\U0001f485\U0001f3fd|\U0001f485\U0001f3fe|\U0001f485\U0001f3ff|\U0001f933\U0001f3fb|\U0001f933\U0001f3fc|\U0001f933\U0001f3fd|\U0001f933\U0001f3fe|\U0001f933\U0001f3ff|\U0001f4aa\U0001f3fb|\U0001f4aa\U0001f3fc|\U0001f4aa\U0001f3fd|\U0001f4aa\U0001f3fe|\U0001f4aa\U0001f3ff|\U0001f9b5\U0001f3fb|\U0001f9b5\U0001f3fc|\U0001f9b5\U0001f3fd|\U0001f9b5\U0001f3fe|\U0001f9b5\U0001f3ff|\U0001f9b6\U0001f3fb|\U0001f9b6\U0001f3fc|\U0001f9b6\U0001f3fd|\U0001f9b6\U0001f3fe|\U0001f9b6\U0001f3ff|\U0001f442\U0001f3fb|\U0001f442\U0001f3fc|\U0001f442\U0001f3fd|\U0001f442\U0001f3fe|\U0001f442\U0001f3ff|\U0001f9bb\U0001f3fb|\U0001f9bb\U0001f3fc|\U0001f9bb\U0001f3fd|\U0001f9bb\U0001f3fe|\U0001f9bb\U0001f3ff|\U0001f443\U0001f3fb|\U0001f443\U0001f3fc|\U0001f443\U0001f3fd|\U0001f443\U0001f3fe|\U0001f443\U0001f3ff|\U0001f441\ufe0f|\U0001f476\U0001f3fb|\U0001f476\U0001f3fc|\U0001f476\U0001f3fd|\U0001f476\U0001f3fe|\U0001f476\U0001f3ff|\U0001f9d2\U0001f3fb|\U0001f9d2\U0001f3fc|\U0001f9d2\U0001f3fd|\U0001f9d2\U0001f3fe|\U0001f9d2\U0001f3ff|\U0001f466\U0001f3fb|\U0001f466\U0001f3fc|\U0001f466\U0001f3fd|\U0001f466\U0001f3fe|\U0001f466\U0001f3ff|\U0001f467\U0001f3fb|\U0001f467\U0001f3fc|\U0001f467\U0001f3fd|\U0001f467\U0001f3fe|\U0001f467\U0001f3ff|\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff|\U0001f471\U0001f3fb|\U0001f471\U0001f3fc|\U0001f471\U0001f3fd|\U0001f471\U0001f3fe|\U0001f471\U0001f3ff|\U0001f468\U0001f3fb|\U0001f468\U0001f3fc|\U0001f468\U0001f3fd|\U0001f468\U0001f3fe|\U0001f468\U0001f3ff|\U0001f9d4\U0001f3fb|\U0001f9d4\U0001f3fc|\U0001f9d4\U0001f3fd|\U0001f9d4\U0001f3fe|\U0001f9d4\U0001f3ff|\U0001f469\U0001f3fb|\U0001f469\U0001f3fc|\U0001f469\U0001f3fd|\U0001f469\U0001f3fe|\U0001f469\U0001f3ff|\U0001f9d3\U0001f3fb|\U0001f9d3\U0001f3fc|\U0001f9d3\U0001f3fd|\U0001f9d3\U0001f3fe|\U0001f9d3\U0001f3ff|\U0001f474\U0001f3fb|\U0001f474\U0001f3fc|\U0001f474\U0001f3fd|\U0001f474\U0001f3fe|\U0001f474\U0001f3ff|\U0001f475\U0001f3fb|\U0001f475\U0001f3fc|\U0001f475\U0001f3fd|\U0001f475\U0001f3fe|\U0001f475\U0001f3ff|\U0001f64d\U0001f3fb|\U0001f64d\U0001f3fc|\U0001f64d\U0001f3fd|\U0001f64d\U0001f3fe|\U0001f64d\U0001f3ff|\U0001f64e\U0001f3fb|\U0001f64e\U0001f3fc|\U0001f64e\U0001f3fd|\U0001f64e\U0001f3fe|\U0001f64e\U0001f3ff|\U0001f645\U0001f3fb|\U0001f645\U0001f3fc|\U0001f645\U0001f3fd|\U0001f645\U0001f3fe|\U0001f645\U0001f3ff|\U0001f646\U0001f3fb|\U0001f646\U0001f3fc|\U0001f646\U0001f3fd|\U0001f646\U0001f3fe|\U0001f646\U0001f3ff|\U0001f481\U0001f3fb|\U0001f481\U0001f3fc|\U0001f481\U0001f3fd|\U0001f481\U0001f3fe|\U0001f481\U0001f3ff|\U0001f64b\U0001f3fb|\U0001f64b\U0001f3fc|\U0001f64b\U0001f3fd|\U0001f64b\U0001f3fe|\U0001f64b\U0001f3ff|\U0001f9cf\U0001f3fb|\U0001f9cf\U0001f3fc|\U0001f9cf\U0001f3fd|\U0001f9cf\U0001f3fe|\U0001f9cf\U0001f3ff|\U0001f647\U0001f3fb|\U0001f647\U0001f3fc|\U0001f647\U0001f3fd|\U0001f647\U0001f3fe|\U0001f647\U0001f3ff|\U0001f926\U0001f3fb|\U0001f926\U0001f3fc|\U0001f926\U0001f3fd|\U0001f926\U0001f3fe|\U0001f926\U0001f3ff|\U0001f937\U0001f3fb|\U0001f937\U0001f3fc|\U0001f937\U0001f3fd|\U0001f937\U0001f3fe|\U0001f937\U0001f3ff|\U0001f46e\U0001f3fb|\U0001f46e\U0001f3fc|\U0001f46e\U0001f3fd|\U0001f46e\U0001f3fe|\U0001f46e\U0001f3ff|\U0001f575\ufe0f|\U0001f575\U0001f3fb|\U0001f575\U0001f3fc|\U0001f575\U0001f3fd|\U0001f575\U0001f3fe|\U0001f575\U0001f3ff|\U0001f482\U0001f3fb|\U0001f482\U0001f3fc|\U0001f482\U0001f3fd|\U0001f482\U0001f3fe|\U0001f482\U0001f3ff|\U0001f477\U0001f3fb|\U0001f477\U0001f3fc|\U0001f477\U0001f3fd|\U0001f477\U0001f3fe|\U0001f477\U0001f3ff|\U0001f934\U0001f3fb|\U0001f934\U0001f3fc|\U0001f934\U0001f3fd|\U0001f934\U0001f3fe|\U0001f934\U0001f3ff|\U0001f478\U0001f3fb|\U0001f478\U0001f3fc|\U0001f478\U0001f3fd|\U0001f478\U0001f3fe|\U0001f478\U0001f3ff|\U0001f473\U0001f3fb|\U0001f473\U0001f3fc|\U0001f473\U0001f3fd|\U0001f473\U0001f3fe|\U0001f473\U0001f3ff|\U0001f472\U0001f3fb|\U0001f472\U0001f3fc|\U0001f472\U0001f3fd|\U0001f472\U0001f3fe|\U0001f472\U0001f3ff|\U0001f9d5\U0001f3fb|\U0001f9d5\U0001f3fc|\U0001f9d5\U0001f3fd|\U0001f9d5\U0001f3fe|\U0001f9d5\U0001f3ff|\U0001f935\U0001f3fb|\U0001f935\U0001f3fc|\U0001f935\U0001f3fd|\U0001f935\U0001f3fe|\U0001f935\U0001f3ff|\U0001f470\U0001f3fb|\U0001f470\U0001f3fc|\U0001f470\U0001f3fd|\U0001f470\U0001f3fe|\U0001f470\U0001f3ff|\U0001f930\U0001f3fb|\U0001f930\U0001f3fc|\U0001f930\U0001f3fd|\U0001f930\U0001f3fe|\U0001f930\U0001f3ff|\U0001f931\U0001f3fb|\U0001f931\U0001f3fc|\U0001f931\U0001f3fd|\U0001f931\U0001f3fe|\U0001f931\U0001f3ff|\U0001f47c\U0001f3fb|\U0001f47c\U0001f3fc|\U0001f47c\U0001f3fd|\U0001f47c\U0001f3fe|\U0001f47c\U0001f3ff|\U0001f385\U0001f3fb|\U0001f385\U0001f3fc|\U0001f385\U0001f3fd|\U0001f385\U0001f3fe|\U0001f385\U0001f3ff|\U0001f936\U0001f3fb|\U0001f936\U0001f3fc|\U0001f936\U0001f3fd|\U0001f936\U0001f3fe|\U0001f936\U0001f3ff|\U0001f9b8\U0001f3fb|\U0001f9b8\U0001f3fc|\U0001f9b8\U0001f3fd|\U0001f9b8\U0001f3fe|\U0001f9b8\U0001f3ff|\U0001f9b9\U0001f3fb|\U0001f9b9\U0001f3fc|\U0001f9b9\U0001f3fd|\U0001f9b9\U0001f3fe|\U0001f9b9\U0001f3ff|\U0001f9d9\U0001f3fb|\U0001f9d9\U0001f3fc|\U0001f9d9\U0001f3fd|\U0001f9d9\U0001f3fe|\U0001f9d9\U0001f3ff|\U0001f9da\U0001f3fb|\U0001f9da\U0001f3fc|\U0001f9da\U0001f3fd|\U0001f9da\U0001f3fe|\U0001f9da\U0001f3ff|\U0001f9db\U0001f3fb|\U0001f9db\U0001f3fc|\U0001f9db\U0001f3fd|\U0001f9db\U0001f3fe|\U0001f9db\U0001f3ff|\U0001f9dc\U0001f3fb|\U0001f9dc\U0001f3fc|\U0001f9dc\U0001f3fd|\U0001f9dc\U0001f3fe|\U0001f9dc\U0001f3ff|\U0001f9dd\U0001f3fb|\U0001f9dd\U0001f3fc|\U0001f9dd\U0001f3fd|\U0001f9dd\U0001f3fe|\U0001f9dd\U0001f3ff|\U0001f486\U0001f3fb|\U0001f486\U0001f3fc|\U0001f486\U0001f3fd|\U0001f486\U0001f3fe|\U0001f486\U0001f3ff|\U0001f487\U0001f3fb|\U0001f487\U0001f3fc|\U0001f487\U0001f3fd|\U0001f487\U0001f3fe|\U0001f487\U0001f3ff|\U0001f6b6\U0001f3fb|\U0001f6b6\U0001f3fc|\U0001f6b6\U0001f3fd|\U0001f6b6\U0001f3fe|\U0001f6b6\U0001f3ff|\U0001f9cd\U0001f3fb|\U0001f9cd\U0001f3fc|\U0001f9cd\U0001f3fd|\U0001f9cd\U0001f3fe|\U0001f9cd\U0001f3ff|\U0001f9ce\U0001f3fb|\U0001f9ce\U0001f3fc|\U0001f9ce\U0001f3fd|\U0001f9ce\U0001f3fe|\U0001f9ce\U0001f3ff|\U0001f3c3\U0001f3fb|\U0001f3c3\U0001f3fc|\U0001f3c3\U0001f3fd|\U0001f3c3\U0001f3fe|\U0001f3c3\U0001f3ff|\U0001f483\U0001f3fb|\U0001f483\U0001f3fc|\U0001f483\U0001f3fd|\U0001f483\U0001f3fe|\U0001f483\U0001f3ff|\U0001f57a\U0001f3fb|\U0001f57a\U0001f3fc|\U0001f57a\U0001f3fd|\U0001f57a\U0001f3fe|\U0001f57a\U0001f3ff|\U0001f574\ufe0f|\U0001f574\U0001f3fb|\U0001f574\U0001f3fc|\U0001f574\U0001f3fd|\U0001f574\U0001f3fe|\U0001f574\U0001f3ff|\U0001f9d6\U0001f3fb|\U0001f9d6\U0001f3fc|\U0001f9d6\U0001f3fd|\U0001f9d6\U0001f3fe|\U0001f9d6\U0001f3ff|\U0001f9d7\U0001f3fb|\U0001f9d7\U0001f3fc|\U0001f9d7\U0001f3fd|\U0001f9d7\U0001f3fe|\U0001f9d7\U0001f3ff|\U0001f3c7\U0001f3fb|\U0001f3c7\U0001f3fc|\U0001f3c7\U0001f3fd|\U0001f3c7\U0001f3fe|\U0001f3c7\U0001f3ff|\u26f7\ufe0f|\U0001f3c2\U0001f3fb|\U0001f3c2\U0001f3fc|\U0001f3c2\U0001f3fd|\U0001f3c2\U0001f3fe|\U0001f3c2\U0001f3ff|\U0001f3cc\ufe0f|\U0001f3cc\U0001f3fb|\U0001f3cc\U0001f3fc|\U0001f3cc\U0001f3fd|\U0001f3cc\U0001f3fe|\U0001f3cc\U0001f3ff|\U0001f3c4\U0001f3fb|\U0001f3c4\U0001f3fc|\U0001f3c4\U0001f3fd|\U0001f3c4\U0001f3fe|\U0001f3c4\U0001f3ff|\U0001f6a3\U0001f3fb|\U0001f6a3\U0001f3fc|\U0001f6a3\U0001f3fd|\U0001f6a3\U0001f3fe|\U0001f6a3\U0001f3ff|\U0001f3ca\U0001f3fb|\U0001f3ca\U0001f3fc|\U0001f3ca\U0001f3fd|\U0001f3ca\U0001f3fe|\U0001f3ca\U0001f3ff|\u26f9\ufe0f|\u26f9\U0001f3fb|\u26f9\U0001f3fc|\u26f9\U0001f3fd|\u26f9\U0001f3fe|\u26f9\U0001f3ff|\U0001f3cb\ufe0f|\U0001f3cb\U0001f3fb|\U0001f3cb\U0001f3fc|\U0001f3cb\U0001f3fd|\U0001f3cb\U0001f3fe|\U0001f3cb\U0001f3ff|\U0001f6b4\U0001f3fb|\U0001f6b4\U0001f3fc|\U0001f6b4\U0001f3fd|\U0001f6b4\U0001f3fe|\U0001f6b4\U0001f3ff|\U0001f6b5\U0001f3fb|\U0001f6b5\U0001f3fc|\U0001f6b5\U0001f3fd|\U0001f6b5\U0001f3fe|\U0001f6b5\U0001f3ff|\U0001f938\U0001f3fb|\U0001f938\U0001f3fc|\U0001f938\U0001f3fd|\U0001f938\U0001f3fe|\U0001f938\U0001f3ff|\U0001f93d\U0001f3fb|\U0001f93d\U0001f3fc|\U0001f93d\U0001f3fd|\U0001f93d\U0001f3fe|\U0001f93d\U0001f3ff|\U0001f93e\U0001f3fb|\U0001f93e\U0001f3fc|\U0001f93e\U0001f3fd|\U0001f93e\U0001f3fe|\U0001f93e\U0001f3ff|\U0001f939\U0001f3fb|\U0001f939\U0001f3fc|\U0001f939\U0001f3fd|\U0001f939\U0001f3fe|\U0001f939\U0001f3ff|\U0001f9d8\U0001f3fb|\U0001f9d8\U0001f3fc|\U0001f9d8\U0001f3fd|\U0001f9d8\U0001f3fe|\U0001f9d8\U0001f3ff|\U0001f6c0\U0001f3fb|\U0001f6c0\U0001f3fc|\U0001f6c0\U0001f3fd|\U0001f6c0\U0001f3fe|\U0001f6c0\U0001f3ff|\U0001f6cc\U0001f3fb|\U0001f6cc\U0001f3fc|\U0001f6cc\U0001f3fd|\U0001f6cc\U0001f3fe|\U0001f6cc\U0001f3ff|\U0001f46d\U0001f3fb|\U0001f46d\U0001f3fc|\U0001f46d\U0001f3fd|\U0001f46d\U0001f3fe|\U0001f46d\U0001f3ff|\U0001f46b\U0001f3fb|\U0001f46b\U0001f3fc|\U0001f46b\U0001f3fd|\U0001f46b\U0001f3fe|\U0001f46b\U0001f3ff|\U0001f46c\U0001f3fb|\U0001f46c\U0001f3fc|\U0001f46c\U0001f3fd|\U0001f46c\U0001f3fe|\U0001f46c\U0001f3ff|\U0001f5e3\ufe0f|\U0001f43f\ufe0f|\U0001f54a\ufe0f|\U0001f577\ufe0f|\U0001f578\ufe0f|\U0001f3f5\ufe0f|\u2618\ufe0f|\U0001f336\ufe0f|\U0001f37d\ufe0f|\U0001f5fa\ufe0f|\U0001f3d4\ufe0f|\u26f0\ufe0f|\U0001f3d5\ufe0f|\U0001f3d6\ufe0f|\U0001f3dc\ufe0f|\U0001f3dd\ufe0f|\U0001f3de\ufe0f|\U0001f3df\ufe0f|\U0001f3db\ufe0f|\U0001f3d7\ufe0f|\U0001f3d8\ufe0f|\U0001f3da\ufe0f|\u26e9\ufe0f|\U0001f3d9\ufe0f|\u2668\ufe0f|\U0001f3ce\ufe0f|\U0001f3cd\ufe0f|\U0001f6e3\ufe0f|\U0001f6e4\ufe0f|\U0001f6e2\ufe0f|\U0001f6f3\ufe0f|\u26f4\ufe0f|\U0001f6e5\ufe0f|\u2708\ufe0f|\U0001f6e9\ufe0f|\U0001f6f0\ufe0f|\U0001f6ce\ufe0f|\u23f1\ufe0f|\u23f2\ufe0f|\U0001f570\ufe0f|\U0001f321\ufe0f|\u2600\ufe0f|\u2601\ufe0f|\u26c8\ufe0f|\U0001f324\ufe0f|\U0001f325\ufe0f|\U0001f326\ufe0f|\U0001f327\ufe0f|\U0001f328\ufe0f|\U0001f329\ufe0f|\U0001f32a\ufe0f|\U0001f32b\ufe0f|\U0001f32c\ufe0f|\u2602\ufe0f|\u26f1\ufe0f|\u2744\ufe0f|\u2603\ufe0f|\u2604\ufe0f|\U0001f397\ufe0f|\U0001f39f\ufe0f|\U0001f396\ufe0f|\u26f8\ufe0f|\U0001f579\ufe0f|\u2660\ufe0f|\u2665\ufe0f|\u2666\ufe0f|\u2663\ufe0f|\u265f\ufe0f|\U0001f5bc\ufe0f|\U0001f576\ufe0f|\U0001f6cd\ufe0f|\u26d1\ufe0f|\U0001f399\ufe0f|\U0001f39a\ufe0f|\U0001f39b\ufe0f|\u260e\ufe0f|\U0001f5a5\ufe0f|\U0001f5a8\ufe0f|\u2328\ufe0f|\U0001f5b1\ufe0f|\U0001f5b2\ufe0f|\U0001f39e\ufe0f|\U0001f4fd\ufe0f|\U0001f56f\ufe0f|\U0001f5de\ufe0f|\U0001f3f7\ufe0f|\u2709\ufe0f|\U0001f5f3\ufe0f|\u270f\ufe0f|\u2712\ufe0f|\U0001f58b\ufe0f|\U0001f58a\ufe0f|\U0001f58c\ufe0f|\U0001f58d\ufe0f|\U0001f5c2\ufe0f|\U0001f5d2\ufe0f|\U0001f5d3\ufe0f|\U0001f587\ufe0f|\u2702\ufe0f|\U0001f5c3\ufe0f|\U0001f5c4\ufe0f|\U0001f5d1\ufe0f|\U0001f5dd\ufe0f|\u26cf\ufe0f|\u2692\ufe0f|\U0001f6e0\ufe0f|\U0001f5e1\ufe0f|\u2694\ufe0f|\U0001f6e1\ufe0f|\u2699\ufe0f|\U0001f5dc\ufe0f|\u2696\ufe0f|\u26d3\ufe0f|\u2697\ufe0f|\U0001f6cf\ufe0f|\U0001f6cb\ufe0f|\u26b0\ufe0f|\u26b1\ufe0f|\u26a0\ufe0f|\u2622\ufe0f|\u2623\ufe0f|\u2b06\ufe0f|\u2197\ufe0f|\u27a1\ufe0f|\u2198\ufe0f|\u2b07\ufe0f|\u2199\ufe0f|\u2b05\ufe0f|\u2196\ufe0f|\u2195\ufe0f|\u2194\ufe0f|\u21a9\ufe0f|\u21aa\ufe0f|\u2934\ufe0f|\u2935\ufe0f|\u269b\ufe0f|\U0001f549\ufe0f|\u2721\ufe0f|\u2638\ufe0f|\u262f\ufe0f|\u271d\ufe0f|\u2626\ufe0f|\u262a\ufe0f|\u262e\ufe0f|\u25b6\ufe0f|\u23ed\ufe0f|\u23ef\ufe0f|\u25c0\ufe0f|\u23ee\ufe0f|\u23f8\ufe0f|\u23f9\ufe0f|\u23fa\ufe0f|\u23cf\ufe0f|\u2640\ufe0f|\u2642\ufe0f|\u2695\ufe0f|\u267e\ufe0f|\u267b\ufe0f|\u269c\ufe0f|\u2611\ufe0f|\u2714\ufe0f|\u2716\ufe0f|\u303d\ufe0f|\u2733\ufe0f|\u2734\ufe0f|\u2747\ufe0f|\u203c\ufe0f|\u2049\ufe0f|\u3030\ufe0f|\xa9\ufe0f|\xae\ufe0f|\u2122\ufe0f|\\#\u20e3|\\*\u20e3|0\u20e3|1\u20e3|2\u20e3|3\u20e3|4\u20e3|5\u20e3|6\u20e3|7\u20e3|8\u20e3|9\u20e3|\U0001f170\ufe0f|\U0001f171\ufe0f|\u2139\ufe0f|\u24c2\ufe0f|\U0001f17e\ufe0f|\U0001f17f\ufe0f|\U0001f202\ufe0f|\U0001f237\ufe0f|\u3297\ufe0f|\u3299\ufe0f|\u25fc\ufe0f|\u25fb\ufe0f|\u25aa\ufe0f|\u25ab\ufe0f|\U0001f3f3\ufe0f|\U0001f1e6\U0001f1e8|\U0001f1e6\U0001f1e9|\U0001f1e6\U0001f1ea|\U0001f1e6\U0001f1eb|\U0001f1e6\U0001f1ec|\U0001f1e6\U0001f1ee|\U0001f1e6\U0001f1f1|\U0001f1e6\U0001f1f2|\U0001f1e6\U0001f1f4|\U0001f1e6\U0001f1f6|\U0001f1e6\U0001f1f7|\U0001f1e6\U0001f1f8|\U0001f1e6\U0001f1f9|\U0001f1e6\U0001f1fa|\U0001f1e6\U0001f1fc|\U0001f1e6\U0001f1fd|\U0001f1e6\U0001f1ff|\U0001f1e7\U0001f1e6|\U0001f1e7\U0001f1e7|\U0001f1e7\U0001f1e9|\U0001f1e7\U0001f1ea|\U0001f1e7\U0001f1eb|\U0001f1e7\U0001f1ec|\U0001f1e7\U0001f1ed|\U0001f1e7\U0001f1ee|\U0001f1e7\U0001f1ef|\U0001f1e7\U0001f1f1|\U0001f1e7\U0001f1f2|\U0001f1e7\U0001f1f3|\U0001f1e7\U0001f1f4|\U0001f1e7\U0001f1f6|\U0001f1e7\U0001f1f7|\U0001f1e7\U0001f1f8|\U0001f1e7\U0001f1f9|\U0001f1e7\U0001f1fb|\U0001f1e7\U0001f1fc|\U0001f1e7\U0001f1fe|\U0001f1e7\U0001f1ff|\U0001f1e8\U0001f1e6|\U0001f1e8\U0001f1e8|\U0001f1e8\U0001f1e9|\U0001f1e8\U0001f1eb|\U0001f1e8\U0001f1ec|\U0001f1e8\U0001f1ed|\U0001f1e8\U0001f1ee|\U0001f1e8\U0001f1f0|\U0001f1e8\U0001f1f1|\U0001f1e8\U0001f1f2|\U0001f1e8\U0001f1f3|\U0001f1e8\U0001f1f4|\U0001f1e8\U0001f1f5|\U0001f1e8\U0001f1f7|\U0001f1e8\U0001f1fa|\U0001f1e8\U0001f1fb|\U0001f1e8\U0001f1fc|\U0001f1e8\U0001f1fd|\U0001f1e8\U0001f1fe|\U0001f1e8\U0001f1ff|\U0001f1e9\U0001f1ea|\U0001f1e9\U0001f1ec|\U0001f1e9\U0001f1ef|\U0001f1e9\U0001f1f0|\U0001f1e9\U0001f1f2|\U0001f1e9\U0001f1f4|\U0001f1e9\U0001f1ff|\U0001f1ea\U0001f1e6|\U0001f1ea\U0001f1e8|\U0001f1ea\U0001f1ea|\U0001f1ea\U0001f1ec|\U0001f1ea\U0001f1ed|\U0001f1ea\U0001f1f7|\U0001f1ea\U0001f1f8|\U0001f1ea\U0001f1f9|\U0001f1ea\U0001f1fa|\U0001f1eb\U0001f1ee|\U0001f1eb\U0001f1ef|\U0001f1eb\U0001f1f0|\U0001f1eb\U0001f1f2|\U0001f1eb\U0001f1f4|\U0001f1eb\U0001f1f7|\U0001f1ec\U0001f1e6|\U0001f1ec\U0001f1e7|\U0001f1ec\U0001f1e9|\U0001f1ec\U0001f1ea|\U0001f1ec\U0001f1eb|\U0001f1ec\U0001f1ec|\U0001f1ec\U0001f1ed|\U0001f1ec\U0001f1ee|\U0001f1ec\U0001f1f1|\U0001f1ec\U0001f1f2|\U0001f1ec\U0001f1f3|\U0001f1ec\U0001f1f5|\U0001f1ec\U0001f1f6|\U0001f1ec\U0001f1f7|\U0001f1ec\U0001f1f8|\U0001f1ec\U0001f1f9|\U0001f1ec\U0001f1fa|\U0001f1ec\U0001f1fc|\U0001f1ec\U0001f1fe|\U0001f1ed\U0001f1f0|\U0001f1ed\U0001f1f2|\U0001f1ed\U0001f1f3|\U0001f1ed\U0001f1f7|\U0001f1ed\U0001f1f9|\U0001f1ed\U0001f1fa|\U0001f1ee\U0001f1e8|\U0001f1ee\U0001f1e9|\U0001f1ee\U0001f1ea|\U0001f1ee\U0001f1f1|\U0001f1ee\U0001f1f2|\U0001f1ee\U0001f1f3|\U0001f1ee\U0001f1f4|\U0001f1ee\U0001f1f6|\U0001f1ee\U0001f1f7|\U0001f1ee\U0001f1f8|\U0001f1ee\U0001f1f9|\U0001f1ef\U0001f1ea|\U0001f1ef\U0001f1f2|\U0001f1ef\U0001f1f4|\U0001f1ef\U0001f1f5|\U0001f1f0\U0001f1ea|\U0001f1f0\U0001f1ec|\U0001f1f0\U0001f1ed|\U0001f1f0\U0001f1ee|\U0001f1f0\U0001f1f2|\U0001f1f0\U0001f1f3|\U0001f1f0\U0001f1f5|\U0001f1f0\U0001f1f7|\U0001f1f0\U0001f1fc|\U0001f1f0\U0001f1fe|\U0001f1f0\U0001f1ff|\U0001f1f1\U0001f1e6|\U0001f1f1\U0001f1e7|\U0001f1f1\U0001f1e8|\U0001f1f1\U0001f1ee|\U0001f1f1\U0001f1f0|\U0001f1f1\U0001f1f7|\U0001f1f1\U0001f1f8|\U0001f1f1\U0001f1f9|\U0001f1f1\U0001f1fa|\U0001f1f1\U0001f1fb|\U0001f1f1\U0001f1fe|\U0001f1f2\U0001f1e6|\U0001f1f2\U0001f1e8|\U0001f1f2\U0001f1e9|\U0001f1f2\U0001f1ea|\U0001f1f2\U0001f1eb|\U0001f1f2\U0001f1ec|\U0001f1f2\U0001f1ed|\U0001f1f2\U0001f1f0|\U0001f1f2\U0001f1f1|\U0001f1f2\U0001f1f2|\U0001f1f2\U0001f1f3|\U0001f1f2\U0001f1f4|\U0001f1f2\U0001f1f5|\U0001f1f2\U0001f1f6|\U0001f1f2\U0001f1f7|\U0001f1f2\U0001f1f8|\U0001f1f2\U0001f1f9|\U0001f1f2\U0001f1fa|\U0001f1f2\U0001f1fb|\U0001f1f2\U0001f1fc|\U0001f1f2\U0001f1fd|\U0001f1f2\U0001f1fe|\U0001f1f2\U0001f1ff|\U0001f1f3\U0001f1e6|\U0001f1f3\U0001f1e8|\U0001f1f3\U0001f1ea|\U0001f1f3\U0001f1eb|\U0001f1f3\U0001f1ec|\U0001f1f3\U0001f1ee|\U0001f1f3\U0001f1f1|\U0001f1f3\U0001f1f4|\U0001f1f3\U0001f1f5|\U0001f1f3\U0001f1f7|\U0001f1f3\U0001f1fa|\U0001f1f3\U0001f1ff|\U0001f1f4\U0001f1f2|\U0001f1f5\U0001f1e6|\U0001f1f5\U0001f1ea|\U0001f1f5\U0001f1eb|\U0001f1f5\U0001f1ec|\U0001f1f5\U0001f1ed|\U0001f1f5\U0001f1f0|\U0001f1f5\U0001f1f1|\U0001f1f5\U0001f1f2|\U0001f1f5\U0001f1f3|\U0001f1f5\U0001f1f7|\U0001f1f5\U0001f1f8|\U0001f1f5\U0001f1f9|\U0001f1f5\U0001f1fc|\U0001f1f5\U0001f1fe|\U0001f1f6\U0001f1e6|\U0001f1f7\U0001f1ea|\U0001f1f7\U0001f1f4|\U0001f1f7\U0001f1f8|\U0001f1f7\U0001f1fa|\U0001f1f7\U0001f1fc|\U0001f1f8\U0001f1e6|\U0001f1f8\U0001f1e7|\U0001f1f8\U0001f1e8|\U0001f1f8\U0001f1e9|\U0001f1f8\U0001f1ea|\U0001f1f8\U0001f1ec|\U0001f1f8\U0001f1ed|\U0001f1f8\U0001f1ee|\U0001f1f8\U0001f1ef|\U0001f1f8\U0001f1f0|\U0001f1f8\U0001f1f1|\U0001f1f8\U0001f1f2|\U0001f1f8\U0001f1f3|\U0001f1f8\U0001f1f4|\U0001f1f8\U0001f1f7|\U0001f1f8\U0001f1f8|\U0001f1f8\U0001f1f9|\U0001f1f8\U0001f1fb|\U0001f1f8\U0001f1fd|\U0001f1f8\U0001f1fe|\U0001f1f8\U0001f1ff|\U0001f1f9\U0001f1e6|\U0001f1f9\U0001f1e8|\U0001f1f9\U0001f1e9|\U0001f1f9\U0001f1eb|\U0001f1f9\U0001f1ec|\U0001f1f9\U0001f1ed|\U0001f1f9\U0001f1ef|\U0001f1f9\U0001f1f0|\U0001f1f9\U0001f1f1|\U0001f1f9\U0001f1f2|\U0001f1f9\U0001f1f3|\U0001f1f9\U0001f1f4|\U0001f1f9\U0001f1f7|\U0001f1f9\U0001f1f9|\U0001f1f9\U0001f1fb|\U0001f1f9\U0001f1fc|\U0001f1f9\U0001f1ff|\U0001f1fa\U0001f1e6|\U0001f1fa\U0001f1ec|\U0001f1fa\U0001f1f2|\U0001f1fa\U0001f1f3|\U0001f1fa\U0001f1f8|\U0001f1fa\U0001f1fe|\U0001f1fa\U0001f1ff|\U0001f1fb\U0001f1e6|\U0001f1fb\U0001f1e8|\U0001f1fb\U0001f1ea|\U0001f1fb\U0001f1ec|\U0001f1fb\U0001f1ee|\U0001f1fb\U0001f1f3|\U0001f1fb\U0001f1fa|\U0001f1fc\U0001f1eb|\U0001f1fc\U0001f1f8|\U0001f1fd\U0001f1f0|\U0001f1fe\U0001f1ea|\U0001f1fe\U0001f1f9|\U0001f1ff\U0001f1e6|\U0001f1ff\U0001f1f2|\U0001f1ff\U0001f1fc|\U0001f600|\U0001f603|\U0001f604|\U0001f601|\U0001f606|\U0001f605|\U0001f923|\U0001f602|\U0001f642|\U0001f643|\U0001f609|\U0001f60a|\U0001f607|\U0001f970|\U0001f60d|\U0001f929|\U0001f618|\U0001f617|\u263a|\U0001f61a|\U0001f619|\U0001f60b|\U0001f61b|\U0001f61c|\U0001f92a|\U0001f61d|\U0001f911|\U0001f917|\U0001f92d|\U0001f92b|\U0001f914|\U0001f910|\U0001f928|\U0001f610|\U0001f611|\U0001f636|\U0001f60f|\U0001f612|\U0001f644|\U0001f62c|\U0001f925|\U0001f60c|\U0001f614|\U0001f62a|\U0001f924|\U0001f634|\U0001f637|\U0001f912|\U0001f915|\U0001f922|\U0001f92e|\U0001f927|\U0001f975|\U0001f976|\U0001f974|\U0001f635|\U0001f92f|\U0001f920|\U0001f973|\U0001f60e|\U0001f913|\U0001f9d0|\U0001f615|\U0001f61f|\U0001f641|\u2639|\U0001f62e|\U0001f62f|\U0001f632|\U0001f633|\U0001f97a|\U0001f626|\U0001f627|\U0001f628|\U0001f630|\U0001f625|\U0001f622|\U0001f62d|\U0001f631|\U0001f616|\U0001f623|\U0001f61e|\U0001f613|\U0001f629|\U0001f62b|\U0001f971|\U0001f624|\U0001f621|\U0001f620|\U0001f92c|\U0001f608|\U0001f47f|\U0001f480|\u2620|\U0001f4a9|\U0001f921|\U0001f479|\U0001f47a|\U0001f47b|\U0001f47d|\U0001f47e|\U0001f916|\U0001f63a|\U0001f638|\U0001f639|\U0001f63b|\U0001f63c|\U0001f63d|\U0001f640|\U0001f63f|\U0001f63e|\U0001f648|\U0001f649|\U0001f64a|\U0001f48b|\U0001f48c|\U0001f498|\U0001f49d|\U0001f496|\U0001f497|\U0001f493|\U0001f49e|\U0001f495|\U0001f49f|\u2763|\U0001f494|\u2764|\U0001f9e1|\U0001f49b|\U0001f49a|\U0001f499|\U0001f49c|\U0001f90e|\U0001f5a4|\U0001f90d|\U0001f4af|\U0001f4a2|\U0001f4a5|\U0001f4ab|\U0001f4a6|\U0001f4a8|\U0001f573|\U0001f4a3|\U0001f4ac|\U0001f5e8|\U0001f5ef|\U0001f4ad|\U0001f4a4|\U0001f44b|\U0001f91a|\U0001f590|\u270b|\U0001f596|\U0001f44c|\U0001f90f|\u270c|\U0001f91e|\U0001f91f|\U0001f918|\U0001f919|\U0001f448|\U0001f449|\U0001f446|\U0001f595|\U0001f447|\u261d|\U0001f44d|\U0001f44e|\u270a|\U0001f44a|\U0001f91b|\U0001f91c|\U0001f44f|\U0001f64c|\U0001f450|\U0001f932|\U0001f91d|\U0001f64f|\u270d|\U0001f485|\U0001f933|\U0001f4aa|\U0001f9be|\U0001f9bf|\U0001f9b5|\U0001f9b6|\U0001f442|\U0001f9bb|\U0001f443|\U0001f9e0|\U0001f9b7|\U0001f9b4|\U0001f440|\U0001f441|\U0001f445|\U0001f444|\U0001f476|\U0001f9d2|\U0001f466|\U0001f467|\U0001f9d1|\U0001f471|\U0001f468|\U0001f9d4|\U0001f469|\U0001f9d3|\U0001f474|\U0001f475|\U0001f64d|\U0001f64e|\U0001f645|\U0001f646|\U0001f481|\U0001f64b|\U0001f9cf|\U0001f647|\U0001f926|\U0001f937|\U0001f46e|\U0001f575|\U0001f482|\U0001f477|\U0001f934|\U0001f478|\U0001f473|\U0001f472|\U0001f9d5|\U0001f935|\U0001f470|\U0001f930|\U0001f931|\U0001f47c|\U0001f385|\U0001f936|\U0001f9b8|\U0001f9b9|\U0001f9d9|\U0001f9da|\U0001f9db|\U0001f9dc|\U0001f9dd|\U0001f9de|\U0001f9df|\U0001f486|\U0001f487|\U0001f6b6|\U0001f9cd|\U0001f9ce|\U0001f3c3|\U0001f483|\U0001f57a|\U0001f574|\U0001f46f|\U0001f9d6|\U0001f9d7|\U0001f93a|\U0001f3c7|\u26f7|\U0001f3c2|\U0001f3cc|\U0001f3c4|\U0001f6a3|\U0001f3ca|\u26f9|\U0001f3cb|\U0001f6b4|\U0001f6b5|\U0001f938|\U0001f93c|\U0001f93d|\U0001f93e|\U0001f939|\U0001f9d8|\U0001f6c0|\U0001f6cc|\U0001f46d|\U0001f46b|\U0001f46c|\U0001f48f|\U0001f491|\U0001f46a|\U0001f5e3|\U0001f464|\U0001f465|\U0001f463|\U0001f3fb|\U0001f3fc|\U0001f3fd|\U0001f3fe|\U0001f3ff|\U0001f9b0|\U0001f9b1|\U0001f9b3|\U0001f9b2|\U0001f435|\U0001f412|\U0001f98d|\U0001f9a7|\U0001f436|\U0001f415|\U0001f9ae|\U0001f429|\U0001f43a|\U0001f98a|\U0001f99d|\U0001f431|\U0001f408|\U0001f981|\U0001f42f|\U0001f405|\U0001f406|\U0001f434|\U0001f40e|\U0001f984|\U0001f993|\U0001f98c|\U0001f42e|\U0001f402|\U0001f403|\U0001f404|\U0001f437|\U0001f416|\U0001f417|\U0001f43d|\U0001f40f|\U0001f411|\U0001f410|\U0001f42a|\U0001f42b|\U0001f999|\U0001f992|\U0001f418|\U0001f98f|\U0001f99b|\U0001f42d|\U0001f401|\U0001f400|\U0001f439|\U0001f430|\U0001f407|\U0001f43f|\U0001f994|\U0001f987|\U0001f43b|\U0001f428|\U0001f43c|\U0001f9a5|\U0001f9a6|\U0001f9a8|\U0001f998|\U0001f9a1|\U0001f43e|\U0001f983|\U0001f414|\U0001f413|\U0001f423|\U0001f424|\U0001f425|\U0001f426|\U0001f427|\U0001f54a|\U0001f985|\U0001f986|\U0001f9a2|\U0001f989|\U0001f9a9|\U0001f99a|\U0001f99c|\U0001f438|\U0001f40a|\U0001f422|\U0001f98e|\U0001f40d|\U0001f432|\U0001f409|\U0001f995|\U0001f996|\U0001f433|\U0001f40b|\U0001f42c|\U0001f41f|\U0001f420|\U0001f421|\U0001f988|\U0001f419|\U0001f41a|\U0001f40c|\U0001f98b|\U0001f41b|\U0001f41c|\U0001f41d|\U0001f41e|\U0001f997|\U0001f577|\U0001f578|\U0001f982|\U0001f99f|\U0001f9a0|\U0001f490|\U0001f338|\U0001f4ae|\U0001f3f5|\U0001f339|\U0001f940|\U0001f33a|\U0001f33b|\U0001f33c|\U0001f337|\U0001f331|\U0001f332|\U0001f333|\U0001f334|\U0001f335|\U0001f33e|\U0001f33f|\u2618|\U0001f340|\U0001f341|\U0001f342|\U0001f343|\U0001f347|\U0001f348|\U0001f349|\U0001f34a|\U0001f34b|\U0001f34c|\U0001f34d|\U0001f96d|\U0001f34e|\U0001f34f|\U0001f350|\U0001f351|\U0001f352|\U0001f353|\U0001f95d|\U0001f345|\U0001f965|\U0001f951|\U0001f346|\U0001f954|\U0001f955|\U0001f33d|\U0001f336|\U0001f952|\U0001f96c|\U0001f966|\U0001f9c4|\U0001f9c5|\U0001f344|\U0001f95c|\U0001f330|\U0001f35e|\U0001f950|\U0001f956|\U0001f968|\U0001f96f|\U0001f95e|\U0001f9c7|\U0001f9c0|\U0001f356|\U0001f357|\U0001f969|\U0001f953|\U0001f354|\U0001f35f|\U0001f355|\U0001f32d|\U0001f96a|\U0001f32e|\U0001f32f|\U0001f959|\U0001f9c6|\U0001f95a|\U0001f373|\U0001f958|\U0001f372|\U0001f963|\U0001f957|\U0001f37f|\U0001f9c8|\U0001f9c2|\U0001f96b|\U0001f371|\U0001f358|\U0001f359|\U0001f35a|\U0001f35b|\U0001f35c|\U0001f35d|\U0001f360|\U0001f362|\U0001f363|\U0001f364|\U0001f365|\U0001f96e|\U0001f361|\U0001f95f|\U0001f960|\U0001f961|\U0001f980|\U0001f99e|\U0001f990|\U0001f991|\U0001f9aa|\U0001f366|\U0001f367|\U0001f368|\U0001f369|\U0001f36a|\U0001f382|\U0001f370|\U0001f9c1|\U0001f967|\U0001f36b|\U0001f36c|\U0001f36d|\U0001f36e|\U0001f36f|\U0001f37c|\U0001f95b|\u2615|\U0001f375|\U0001f376|\U0001f37e|\U0001f377|\U0001f378|\U0001f379|\U0001f37a|\U0001f37b|\U0001f942|\U0001f943|\U0001f964|\U0001f9c3|\U0001f9c9|\U0001f9ca|\U0001f962|\U0001f37d|\U0001f374|\U0001f944|\U0001f52a|\U0001f3fa|\U0001f30d|\U0001f30e|\U0001f30f|\U0001f310|\U0001f5fa|\U0001f5fe|\U0001f9ed|\U0001f3d4|\u26f0|\U0001f30b|\U0001f5fb|\U0001f3d5|\U0001f3d6|\U0001f3dc|\U0001f3dd|\U0001f3de|\U0001f3df|\U0001f3db|\U0001f3d7|\U0001f9f1|\U0001f3d8|\U0001f3da|\U0001f3e0|\U0001f3e1|\U0001f3e2|\U0001f3e3|\U0001f3e4|\U0001f3e5|\U0001f3e6|\U0001f3e8|\U0001f3e9|\U0001f3ea|\U0001f3eb|\U0001f3ec|\U0001f3ed|\U0001f3ef|\U0001f3f0|\U0001f492|\U0001f5fc|\U0001f5fd|\u26ea|\U0001f54c|\U0001f6d5|\U0001f54d|\u26e9|\U0001f54b|\u26f2|\u26fa|\U0001f301|\U0001f303|\U0001f3d9|\U0001f304|\U0001f305|\U0001f306|\U0001f307|\U0001f309|\u2668|\U0001f3a0|\U0001f3a1|\U0001f3a2|\U0001f488|\U0001f3aa|\U0001f682|\U0001f683|\U0001f684|\U0001f685|\U0001f686|\U0001f687|\U0001f688|\U0001f689|\U0001f68a|\U0001f69d|\U0001f69e|\U0001f68b|\U0001f68c|\U0001f68d|\U0001f68e|\U0001f690|\U0001f691|\U0001f692|\U0001f693|\U0001f694|\U0001f695|\U0001f696|\U0001f697|\U0001f698|\U0001f699|\U0001f69a|\U0001f69b|\U0001f69c|\U0001f3ce|\U0001f3cd|\U0001f6f5|\U0001f9bd|\U0001f9bc|\U0001f6fa|\U0001f6b2|\U0001f6f4|\U0001f6f9|\U0001f68f|\U0001f6e3|\U0001f6e4|\U0001f6e2|\u26fd|\U0001f6a8|\U0001f6a5|\U0001f6a6|\U0001f6d1|\U0001f6a7|\u2693|\u26f5|\U0001f6f6|\U0001f6a4|\U0001f6f3|\u26f4|\U0001f6e5|\U0001f6a2|\u2708|\U0001f6e9|\U0001f6eb|\U0001f6ec|\U0001fa82|\U0001f4ba|\U0001f681|\U0001f69f|\U0001f6a0|\U0001f6a1|\U0001f6f0|\U0001f680|\U0001f6f8|\U0001f6ce|\U0001f9f3|\u231b|\u23f3|\u231a|\u23f0|\u23f1|\u23f2|\U0001f570|\U0001f55b|\U0001f567|\U0001f550|\U0001f55c|\U0001f551|\U0001f55d|\U0001f552|\U0001f55e|\U0001f553|\U0001f55f|\U0001f554|\U0001f560|\U0001f555|\U0001f561|\U0001f556|\U0001f562|\U0001f557|\U0001f563|\U0001f558|\U0001f564|\U0001f559|\U0001f565|\U0001f55a|\U0001f566|\U0001f311|\U0001f312|\U0001f313|\U0001f314|\U0001f315|\U0001f316|\U0001f317|\U0001f318|\U0001f319|\U0001f31a|\U0001f31b|\U0001f31c|\U0001f321|\u2600|\U0001f31d|\U0001f31e|\U0001fa90|\u2b50|\U0001f31f|\U0001f320|\U0001f30c|\u2601|\u26c5|\u26c8|\U0001f324|\U0001f325|\U0001f326|\U0001f327|\U0001f328|\U0001f329|\U0001f32a|\U0001f32b|\U0001f32c|\U0001f300|\U0001f308|\U0001f302|\u2602|\u2614|\u26f1|\u26a1|\u2744|\u2603|\u26c4|\u2604|\U0001f525|\U0001f4a7|\U0001f30a|\U0001f383|\U0001f384|\U0001f386|\U0001f387|\U0001f9e8|\u2728|\U0001f388|\U0001f389|\U0001f38a|\U0001f38b|\U0001f38d|\U0001f38e|\U0001f38f|\U0001f390|\U0001f391|\U0001f9e7|\U0001f380|\U0001f381|\U0001f397|\U0001f39f|\U0001f3ab|\U0001f396|\U0001f3c6|\U0001f3c5|\U0001f947|\U0001f948|\U0001f949|\u26bd|\u26be|\U0001f94e|\U0001f3c0|\U0001f3d0|\U0001f3c8|\U0001f3c9|\U0001f3be|\U0001f94f|\U0001f3b3|\U0001f3cf|\U0001f3d1|\U0001f3d2|\U0001f94d|\U0001f3d3|\U0001f3f8|\U0001f94a|\U0001f94b|\U0001f945|\u26f3|\u26f8|\U0001f3a3|\U0001f93f|\U0001f3bd|\U0001f3bf|\U0001f6f7|\U0001f94c|\U0001f3af|\U0001fa80|\U0001fa81|\U0001f3b1|\U0001f52e|\U0001f9ff|\U0001f3ae|\U0001f579|\U0001f3b0|\U0001f3b2|\U0001f9e9|\U0001f9f8|\u2660|\u2665|\u2666|\u2663|\u265f|\U0001f0cf|\U0001f004|\U0001f3b4|\U0001f3ad|\U0001f5bc|\U0001f3a8|\U0001f9f5|\U0001f9f6|\U0001f453|\U0001f576|\U0001f97d|\U0001f97c|\U0001f9ba|\U0001f454|\U0001f455|\U0001f456|\U0001f9e3|\U0001f9e4|\U0001f9e5|\U0001f9e6|\U0001f457|\U0001f458|\U0001f97b|\U0001fa71|\U0001fa72|\U0001fa73|\U0001f459|\U0001f45a|\U0001f45b|\U0001f45c|\U0001f45d|\U0001f6cd|\U0001f392|\U0001f45e|\U0001f45f|\U0001f97e|\U0001f97f|\U0001f460|\U0001f461|\U0001fa70|\U0001f462|\U0001f451|\U0001f452|\U0001f3a9|\U0001f393|\U0001f9e2|\u26d1|\U0001f4ff|\U0001f484|\U0001f48d|\U0001f48e|\U0001f507|\U0001f508|\U0001f509|\U0001f50a|\U0001f4e2|\U0001f4e3|\U0001f4ef|\U0001f514|\U0001f515|\U0001f3bc|\U0001f3b5|\U0001f3b6|\U0001f399|\U0001f39a|\U0001f39b|\U0001f3a4|\U0001f3a7|\U0001f4fb|\U0001f3b7|\U0001f3b8|\U0001f3b9|\U0001f3ba|\U0001f3bb|\U0001fa95|\U0001f941|\U0001f4f1|\U0001f4f2|\u260e|\U0001f4de|\U0001f4df|\U0001f4e0|\U0001f50b|\U0001f50c|\U0001f4bb|\U0001f5a5|\U0001f5a8|\u2328|\U0001f5b1|\U0001f5b2|\U0001f4bd|\U0001f4be|\U0001f4bf|\U0001f4c0|\U0001f9ee|\U0001f3a5|\U0001f39e|\U0001f4fd|\U0001f3ac|\U0001f4fa|\U0001f4f7|\U0001f4f8|\U0001f4f9|\U0001f4fc|\U0001f50d|\U0001f50e|\U0001f56f|\U0001f4a1|\U0001f526|\U0001f3ee|\U0001fa94|\U0001f4d4|\U0001f4d5|\U0001f4d6|\U0001f4d7|\U0001f4d8|\U0001f4d9|\U0001f4da|\U0001f4d3|\U0001f4d2|\U0001f4c3|\U0001f4dc|\U0001f4c4|\U0001f4f0|\U0001f5de|\U0001f4d1|\U0001f516|\U0001f3f7|\U0001f4b0|\U0001f4b4|\U0001f4b5|\U0001f4b6|\U0001f4b7|\U0001f4b8|\U0001f4b3|\U0001f9fe|\U0001f4b9|\U0001f4b1|\U0001f4b2|\u2709|\U0001f4e7|\U0001f4e8|\U0001f4e9|\U0001f4e4|\U0001f4e5|\U0001f4e6|\U0001f4eb|\U0001f4ea|\U0001f4ec|\U0001f4ed|\U0001f4ee|\U0001f5f3|\u270f|\u2712|\U0001f58b|\U0001f58a|\U0001f58c|\U0001f58d|\U0001f4dd|\U0001f4bc|\U0001f4c1|\U0001f4c2|\U0001f5c2|\U0001f4c5|\U0001f4c6|\U0001f5d2|\U0001f5d3|\U0001f4c7|\U0001f4c8|\U0001f4c9|\U0001f4ca|\U0001f4cb|\U0001f4cc|\U0001f4cd|\U0001f4ce|\U0001f587|\U0001f4cf|\U0001f4d0|\u2702|\U0001f5c3|\U0001f5c4|\U0001f5d1|\U0001f512|\U0001f513|\U0001f50f|\U0001f510|\U0001f511|\U0001f5dd|\U0001f528|\U0001fa93|\u26cf|\u2692|\U0001f6e0|\U0001f5e1|\u2694|\U0001f52b|\U0001f3f9|\U0001f6e1|\U0001f527|\U0001f529|\u2699|\U0001f5dc|\u2696|\U0001f9af|\U0001f517|\u26d3|\U0001f9f0|\U0001f9f2|\u2697|\U0001f9ea|\U0001f9eb|\U0001f9ec|\U0001f52c|\U0001f52d|\U0001f4e1|\U0001f489|\U0001fa78|\U0001f48a|\U0001fa79|\U0001fa7a|\U0001f6aa|\U0001f6cf|\U0001f6cb|\U0001fa91|\U0001f6bd|\U0001f6bf|\U0001f6c1|\U0001fa92|\U0001f9f4|\U0001f9f7|\U0001f9f9|\U0001f9fa|\U0001f9fb|\U0001f9fc|\U0001f9fd|\U0001f9ef|\U0001f6d2|\U0001f6ac|\u26b0|\u26b1|\U0001f5ff|\U0001f3e7|\U0001f6ae|\U0001f6b0|\u267f|\U0001f6b9|\U0001f6ba|\U0001f6bb|\U0001f6bc|\U0001f6be|\U0001f6c2|\U0001f6c3|\U0001f6c4|\U0001f6c5|\u26a0|\U0001f6b8|\u26d4|\U0001f6ab|\U0001f6b3|\U0001f6ad|\U0001f6af|\U0001f6b1|\U0001f6b7|\U0001f4f5|\U0001f51e|\u2622|\u2623|\u2b06|\u2197|\u27a1|\u2198|\u2b07|\u2199|\u2b05|\u2196|\u2195|\u2194|\u21a9|\u21aa|\u2934|\u2935|\U0001f503|\U0001f504|\U0001f519|\U0001f51a|\U0001f51b|\U0001f51c|\U0001f51d|\U0001f6d0|\u269b|\U0001f549|\u2721|\u2638|\u262f|\u271d|\u2626|\u262a|\u262e|\U0001f54e|\U0001f52f|\u2648|\u2649|\u264a|\u264b|\u264c|\u264d|\u264e|\u264f|\u2650|\u2651|\u2652|\u2653|\u26ce|\U0001f500|\U0001f501|\U0001f502|\u25b6|\u23e9|\u23ed|\u23ef|\u25c0|\u23ea|\u23ee|\U0001f53c|\u23eb|\U0001f53d|\u23ec|\u23f8|\u23f9|\u23fa|\u23cf|\U0001f3a6|\U0001f505|\U0001f506|\U0001f4f6|\U0001f4f3|\U0001f4f4|\u2640|\u2642|\u2695|\u267e|\u267b|\u269c|\U0001f531|\U0001f4db|\U0001f530|\u2b55|\u2705|\u2611|\u2714|\u2716|\u274c|\u274e|\u2795|\u2796|\u2797|\u27b0|\u27bf|\u303d|\u2733|\u2734|\u2747|\u203c|\u2049|\u2753|\u2754|\u2755|\u2757|\u3030|\xa9|\xae|\u2122|\U0001f51f|\U0001f520|\U0001f521|\U0001f522|\U0001f523|\U0001f524|\U0001f170|\U0001f18e|\U0001f171|\U0001f191|\U0001f192|\U0001f193|\u2139|\U0001f194|\u24c2|\U0001f195|\U0001f196|\U0001f17e|\U0001f197|\U0001f17f|\U0001f198|\U0001f199|\U0001f19a|\U0001f201|\U0001f202|\U0001f237|\U0001f236|\U0001f22f|\U0001f250|\U0001f239|\U0001f21a|\U0001f232|\U0001f251|\U0001f238|\U0001f234|\U0001f233|\u3297|\u3299|\U0001f23a|\U0001f235|\U0001f534|\U0001f7e0|\U0001f7e1|\U0001f7e2|\U0001f535|\U0001f7e3|\U0001f7e4|\u26ab|\u26aa|\U0001f7e5|\U0001f7e7|\U0001f7e8|\U0001f7e9|\U0001f7e6|\U0001f7ea|\U0001f7eb|\u2b1b|\u2b1c|\u25fc|\u25fb|\u25fe|\u25fd|\u25aa|\u25ab|\U0001f536|\U0001f537|\U0001f538|\U0001f539|\U0001f53a|\U0001f53b|\U0001f4a0|\U0001f518|\U0001f533|\U0001f532|\U0001f3c1|\U0001f6a9|\U0001f38c|\U0001f3f4|\U0001f3f3')) -> str:
"""Return the string obtained by replacing all emojis in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all emojis in text with 'repl'
Reference:
akkez/emoji.py: Python emoji regexp / python emoji detection
https://gist.github.com/akkez/99ceeae2f13c9d8d9be7df0279e2c438
"""
text = regex.sub(repl, text)
return text
def _email_normalize(self, text: str, repl: str, regex=re.compile(r'[a-zA-Z0-9.!#$%&\'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9.]+')) -> str:
"""Return the string obtained by replacing all email addresses in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all email addresses in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _tel_normalize(self, text: str, repl: str, regex=re.compile(r'[()+\d.\-]*[ ]?\d{2,4}[-. ]+\d{3,4}[-. ]+\d{3,4}')) -> str:
"""Return the string obtained by replacing all phone numbers in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all phone numbers in text with 'repl'
"""
text = regex.sub(repl, text)
return text | def normalize(self, text: str) -> str:
"""Normalize text.
Args:
text (str): text to be normalized
"""
for normalize_fn, repl in self._normalize:
text = normalize_fn(text, repl)
return text | 25 | 34 | import re
class Normalizer:
"""Normalizer return the text replaced with 'repl'.
If 'repl' is None, normalization is not applied to the pattern corresponding to 'repl'.
Args:
url_repl (str): replace all urls in text with this
tag_repl (str): replace all tags in text with this
emoji_repl (str): replace all emojis in text with this
email_repl (str): replace all emails in text with this
tel_repl (str): replace all tels in text with this
"""
def __init__(self, url_repl='[URL]', tag_repl='[TAG]', emoji_repl='[EMOJI]', email_repl='[EMAIL]', tel_repl='[TEL]'):
# repls
self.url_repl = url_repl
self.tag_repl = tag_repl
self.emoji_repl = emoji_repl
self.email_repl = email_repl
self.tel_repl = tel_repl
self._normalize = []
self._init_normalize()
def normalize(self, text: str) -> str:
"""Normalize text.
Args:
text (str): text to be normalized
"""
for normalize_fn, repl in self._normalize:
text = normalize_fn(text, repl)
return text
def _init_normalize(self) -> None:
"""Initialize normalize function.
If 'repl' is None, normalization is not applied to the pattern corresponding to 'repl'.
"""
if self.url_repl is not None:
self._normalize.append((self._url_normalize, self.url_repl))
if self.tag_repl is not None:
self._normalize.append((self._tag_normalize, self.tag_repl))
if self.emoji_repl is not None:
self._normalize.append((self._emoji_normalize, self.emoji_repl))
if self.email_repl is not None:
self._normalize.append((self._email_normalize, self.email_repl))
if self.tel_repl is not None:
self._normalize.append((self._tel_normalize, self.tel_repl))
def _url_normalize(self, text: str, repl: str, regex=re.compile(r'(https?|ftp|www)\S+')) -> str:
"""Return the string obtained by replacing all urls in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all urls in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _tag_normalize(self, text: str, repl: str, regex=re.compile(r'<[^>]*>')) -> str:
"""Return the string obtained by replacing all HTML tags in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all HTML tags in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _emoji_normalize(self, text: str, repl: str, regex=re.compile(r'\U0001f469\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f468|\U0001f468\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f468|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f469|\U0001f9d1\U0001f3fb\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3ff|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fe|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f468\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\u200d\u2764\u200d\U0001f48b\u200d\U0001f468|\U0001f468\u200d\u2764\u200d\U0001f48b\u200d\U0001f468|\U0001f469\u200d\u2764\u200d\U0001f48b\u200d\U0001f469|\U0001f468\u200d\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f468\u200d\U0001f468\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f467\u200d\U0001f467|\U0001f469\u200d\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f3f4\U000e0067\U000e0062\U000e0065\U000e006e\U000e0067\U000e007f|\U0001f3f4\U000e0067\U000e0062\U000e0073\U000e0063\U000e0074\U000e007f|\U0001f3f4\U000e0067\U000e0062\U000e0077\U000e006c\U000e0073\U000e007f|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f468|\U0001f468\u200d\u2764\ufe0f\u200d\U0001f468|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f469|\U0001f441\ufe0f\u200d\U0001f5e8\ufe0f|\U0001f471\U0001f3fb\u200d\u2642\ufe0f|\U0001f471\U0001f3fc\u200d\u2642\ufe0f|\U0001f471\U0001f3fd\u200d\u2642\ufe0f|\U0001f471\U0001f3fe\u200d\u2642\ufe0f|\U0001f471\U0001f3ff\u200d\u2642\ufe0f|\U0001f471\U0001f3fb\u200d\u2640\ufe0f|\U0001f471\U0001f3fc\u200d\u2640\ufe0f|\U0001f471\U0001f3fd\u200d\u2640\ufe0f|\U0001f471\U0001f3fe\u200d\u2640\ufe0f|\U0001f471\U0001f3ff\u200d\u2640\ufe0f|\U0001f64d\U0001f3fb\u200d\u2642\ufe0f|\U0001f64d\U0001f3fc\u200d\u2642\ufe0f|\U0001f64d\U0001f3fd\u200d\u2642\ufe0f|\U0001f64d\U0001f3fe\u200d\u2642\ufe0f|\U0001f64d\U0001f3ff\u200d\u2642\ufe0f|\U0001f64d\U0001f3fb\u200d\u2640\ufe0f|\U0001f64d\U0001f3fc\u200d\u2640\ufe0f|\U0001f64d\U0001f3fd\u200d\u2640\ufe0f|\U0001f64d\U0001f3fe\u200d\u2640\ufe0f|\U0001f64d\U0001f3ff\u200d\u2640\ufe0f|\U0001f64e\U0001f3fb\u200d\u2642\ufe0f|\U0001f64e\U0001f3fc\u200d\u2642\ufe0f|\U0001f64e\U0001f3fd\u200d\u2642\ufe0f|\U0001f64e\U0001f3fe\u200d\u2642\ufe0f|\U0001f64e\U0001f3ff\u200d\u2642\ufe0f|\U0001f64e\U0001f3fb\u200d\u2640\ufe0f|\U0001f64e\U0001f3fc\u200d\u2640\ufe0f|\U0001f64e\U0001f3fd\u200d\u2640\ufe0f|\U0001f64e\U0001f3fe\u200d\u2640\ufe0f|\U0001f64e\U0001f3ff\u200d\u2640\ufe0f|\U0001f645\U0001f3fb\u200d\u2642\ufe0f|\U0001f645\U0001f3fc\u200d\u2642\ufe0f|\U0001f645\U0001f3fd\u200d\u2642\ufe0f|\U0001f645\U0001f3fe\u200d\u2642\ufe0f|\U0001f645\U0001f3ff\u200d\u2642\ufe0f|\U0001f645\U0001f3fb\u200d\u2640\ufe0f|\U0001f645\U0001f3fc\u200d\u2640\ufe0f|\U0001f645\U0001f3fd\u200d\u2640\ufe0f|\U0001f645\U0001f3fe\u200d\u2640\ufe0f|\U0001f645\U0001f3ff\u200d\u2640\ufe0f|\U0001f646\U0001f3fb\u200d\u2642\ufe0f|\U0001f646\U0001f3fc\u200d\u2642\ufe0f|\U0001f646\U0001f3fd\u200d\u2642\ufe0f|\U0001f646\U0001f3fe\u200d\u2642\ufe0f|\U0001f646\U0001f3ff\u200d\u2642\ufe0f|\U0001f646\U0001f3fb\u200d\u2640\ufe0f|\U0001f646\U0001f3fc\u200d\u2640\ufe0f|\U0001f646\U0001f3fd\u200d\u2640\ufe0f|\U0001f646\U0001f3fe\u200d\u2640\ufe0f|\U0001f646\U0001f3ff\u200d\u2640\ufe0f|\U0001f481\U0001f3fb\u200d\u2642\ufe0f|\U0001f481\U0001f3fc\u200d\u2642\ufe0f|\U0001f481\U0001f3fd\u200d\u2642\ufe0f|\U0001f481\U0001f3fe\u200d\u2642\ufe0f|\U0001f481\U0001f3ff\u200d\u2642\ufe0f|\U0001f481\U0001f3fb\u200d\u2640\ufe0f|\U0001f481\U0001f3fc\u200d\u2640\ufe0f|\U0001f481\U0001f3fd\u200d\u2640\ufe0f|\U0001f481\U0001f3fe\u200d\u2640\ufe0f|\U0001f481\U0001f3ff\u200d\u2640\ufe0f|\U0001f64b\U0001f3fb\u200d\u2642\ufe0f|\U0001f64b\U0001f3fc\u200d\u2642\ufe0f|\U0001f64b\U0001f3fd\u200d\u2642\ufe0f|\U0001f64b\U0001f3fe\u200d\u2642\ufe0f|\U0001f64b\U0001f3ff\u200d\u2642\ufe0f|\U0001f64b\U0001f3fb\u200d\u2640\ufe0f|\U0001f64b\U0001f3fc\u200d\u2640\ufe0f|\U0001f64b\U0001f3fd\u200d\u2640\ufe0f|\U0001f64b\U0001f3fe\u200d\u2640\ufe0f|\U0001f64b\U0001f3ff\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fc\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fd\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fe\u200d\u2642\ufe0f|\U0001f9cf\U0001f3ff\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fc\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fd\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fe\u200d\u2640\ufe0f|\U0001f9cf\U0001f3ff\u200d\u2640\ufe0f|\U0001f647\U0001f3fb\u200d\u2642\ufe0f|\U0001f647\U0001f3fc\u200d\u2642\ufe0f|\U0001f647\U0001f3fd\u200d\u2642\ufe0f|\U0001f647\U0001f3fe\u200d\u2642\ufe0f|\U0001f647\U0001f3ff\u200d\u2642\ufe0f|\U0001f647\U0001f3fb\u200d\u2640\ufe0f|\U0001f647\U0001f3fc\u200d\u2640\ufe0f|\U0001f647\U0001f3fd\u200d\u2640\ufe0f|\U0001f647\U0001f3fe\u200d\u2640\ufe0f|\U0001f647\U0001f3ff\u200d\u2640\ufe0f|\U0001f926\U0001f3fb\u200d\u2642\ufe0f|\U0001f926\U0001f3fc\u200d\u2642\ufe0f|\U0001f926\U0001f3fd\u200d\u2642\ufe0f|\U0001f926\U0001f3fe\u200d\u2642\ufe0f|\U0001f926\U0001f3ff\u200d\u2642\ufe0f|\U0001f926\U0001f3fb\u200d\u2640\ufe0f|\U0001f926\U0001f3fc\u200d\u2640\ufe0f|\U0001f926\U0001f3fd\u200d\u2640\ufe0f|\U0001f926\U0001f3fe\u200d\u2640\ufe0f|\U0001f926\U0001f3ff\u200d\u2640\ufe0f|\U0001f937\U0001f3fb\u200d\u2642\ufe0f|\U0001f937\U0001f3fc\u200d\u2642\ufe0f|\U0001f937\U0001f3fd\u200d\u2642\ufe0f|\U0001f937\U0001f3fe\u200d\u2642\ufe0f|\U0001f937\U0001f3ff\u200d\u2642\ufe0f|\U0001f937\U0001f3fb\u200d\u2640\ufe0f|\U0001f937\U0001f3fc\u200d\u2640\ufe0f|\U0001f937\U0001f3fd\u200d\u2640\ufe0f|\U0001f937\U0001f3fe\u200d\u2640\ufe0f|\U0001f937\U0001f3ff\u200d\u2640\ufe0f|\U0001f468\U0001f3fb\u200d\u2695\ufe0f|\U0001f468\U0001f3fc\u200d\u2695\ufe0f|\U0001f468\U0001f3fd\u200d\u2695\ufe0f|\U0001f468\U0001f3fe\u200d\u2695\ufe0f|\U0001f468\U0001f3ff\u200d\u2695\ufe0f|\U0001f469\U0001f3fb\u200d\u2695\ufe0f|\U0001f469\U0001f3fc\u200d\u2695\ufe0f|\U0001f469\U0001f3fd\u200d\u2695\ufe0f|\U0001f469\U0001f3fe\u200d\u2695\ufe0f|\U0001f469\U0001f3ff\u200d\u2695\ufe0f|\U0001f468\U0001f3fb\u200d\u2696\ufe0f|\U0001f468\U0001f3fc\u200d\u2696\ufe0f|\U0001f468\U0001f3fd\u200d\u2696\ufe0f|\U0001f468\U0001f3fe\u200d\u2696\ufe0f|\U0001f468\U0001f3ff\u200d\u2696\ufe0f|\U0001f469\U0001f3fb\u200d\u2696\ufe0f|\U0001f469\U0001f3fc\u200d\u2696\ufe0f|\U0001f469\U0001f3fd\u200d\u2696\ufe0f|\U0001f469\U0001f3fe\u200d\u2696\ufe0f|\U0001f469\U0001f3ff\u200d\u2696\ufe0f|\U0001f468\U0001f3fb\u200d\u2708\ufe0f|\U0001f468\U0001f3fc\u200d\u2708\ufe0f|\U0001f468\U0001f3fd\u200d\u2708\ufe0f|\U0001f468\U0001f3fe\u200d\u2708\ufe0f|\U0001f468\U0001f3ff\u200d\u2708\ufe0f|\U0001f469\U0001f3fb\u200d\u2708\ufe0f|\U0001f469\U0001f3fc\u200d\u2708\ufe0f|\U0001f469\U0001f3fd\u200d\u2708\ufe0f|\U0001f469\U0001f3fe\u200d\u2708\ufe0f|\U0001f469\U0001f3ff\u200d\u2708\ufe0f|\U0001f46e\U0001f3fb\u200d\u2642\ufe0f|\U0001f46e\U0001f3fc\u200d\u2642\ufe0f|\U0001f46e\U0001f3fd\u200d\u2642\ufe0f|\U0001f46e\U0001f3fe\u200d\u2642\ufe0f|\U0001f46e\U0001f3ff\u200d\u2642\ufe0f|\U0001f46e\U0001f3fb\u200d\u2640\ufe0f|\U0001f46e\U0001f3fc\u200d\u2640\ufe0f|\U0001f46e\U0001f3fd\u200d\u2640\ufe0f|\U0001f46e\U0001f3fe\u200d\u2640\ufe0f|\U0001f46e\U0001f3ff\u200d\u2640\ufe0f|\U0001f575\ufe0f\u200d\u2642\ufe0f|\U0001f575\U0001f3fb\u200d\u2642\ufe0f|\U0001f575\U0001f3fc\u200d\u2642\ufe0f|\U0001f575\U0001f3fd\u200d\u2642\ufe0f|\U0001f575\U0001f3fe\u200d\u2642\ufe0f|\U0001f575\U0001f3ff\u200d\u2642\ufe0f|\U0001f575\ufe0f\u200d\u2640\ufe0f|\U0001f575\U0001f3fb\u200d\u2640\ufe0f|\U0001f575\U0001f3fc\u200d\u2640\ufe0f|\U0001f575\U0001f3fd\u200d\u2640\ufe0f|\U0001f575\U0001f3fe\u200d\u2640\ufe0f|\U0001f575\U0001f3ff\u200d\u2640\ufe0f|\U0001f482\U0001f3fb\u200d\u2642\ufe0f|\U0001f482\U0001f3fc\u200d\u2642\ufe0f|\U0001f482\U0001f3fd\u200d\u2642\ufe0f|\U0001f482\U0001f3fe\u200d\u2642\ufe0f|\U0001f482\U0001f3ff\u200d\u2642\ufe0f|\U0001f482\U0001f3fb\u200d\u2640\ufe0f|\U0001f482\U0001f3fc\u200d\u2640\ufe0f|\U0001f482\U0001f3fd\u200d\u2640\ufe0f|\U0001f482\U0001f3fe\u200d\u2640\ufe0f|\U0001f482\U0001f3ff\u200d\u2640\ufe0f|\U0001f477\U0001f3fb\u200d\u2642\ufe0f|\U0001f477\U0001f3fc\u200d\u2642\ufe0f|\U0001f477\U0001f3fd\u200d\u2642\ufe0f|\U0001f477\U0001f3fe\u200d\u2642\ufe0f|\U0001f477\U0001f3ff\u200d\u2642\ufe0f|\U0001f477\U0001f3fb\u200d\u2640\ufe0f|\U0001f477\U0001f3fc\u200d\u2640\ufe0f|\U0001f477\U0001f3fd\u200d\u2640\ufe0f|\U0001f477\U0001f3fe\u200d\u2640\ufe0f|\U0001f477\U0001f3ff\u200d\u2640\ufe0f|\U0001f473\U0001f3fb\u200d\u2642\ufe0f|\U0001f473\U0001f3fc\u200d\u2642\ufe0f|\U0001f473\U0001f3fd\u200d\u2642\ufe0f|\U0001f473\U0001f3fe\u200d\u2642\ufe0f|\U0001f473\U0001f3ff\u200d\u2642\ufe0f|\U0001f473\U0001f3fb\u200d\u2640\ufe0f|\U0001f473\U0001f3fc\u200d\u2640\ufe0f|\U0001f473\U0001f3fd\u200d\u2640\ufe0f|\U0001f473\U0001f3fe\u200d\u2640\ufe0f|\U0001f473\U0001f3ff\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fc\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fd\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fe\u200d\u2642\ufe0f|\U0001f9b8\U0001f3ff\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fc\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fd\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fe\u200d\u2640\ufe0f|\U0001f9b8\U0001f3ff\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fc\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fd\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fe\u200d\u2642\ufe0f|\U0001f9b9\U0001f3ff\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fc\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fd\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fe\u200d\u2640\ufe0f|\U0001f9b9\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d9\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d9\U0001f3ff\u200d\u2640\ufe0f|\U0001f9da\U0001f3fb\u200d\u2642\ufe0f|\U0001f9da\U0001f3fc\u200d\u2642\ufe0f|\U0001f9da\U0001f3fd\u200d\u2642\ufe0f|\U0001f9da\U0001f3fe\u200d\u2642\ufe0f|\U0001f9da\U0001f3ff\u200d\u2642\ufe0f|\U0001f9da\U0001f3fb\u200d\u2640\ufe0f|\U0001f9da\U0001f3fc\u200d\u2640\ufe0f|\U0001f9da\U0001f3fd\u200d\u2640\ufe0f|\U0001f9da\U0001f3fe\u200d\u2640\ufe0f|\U0001f9da\U0001f3ff\u200d\u2640\ufe0f|\U0001f9db\U0001f3fb\u200d\u2642\ufe0f|\U0001f9db\U0001f3fc\u200d\u2642\ufe0f|\U0001f9db\U0001f3fd\u200d\u2642\ufe0f|\U0001f9db\U0001f3fe\u200d\u2642\ufe0f|\U0001f9db\U0001f3ff\u200d\u2642\ufe0f|\U0001f9db\U0001f3fb\u200d\u2640\ufe0f|\U0001f9db\U0001f3fc\u200d\u2640\ufe0f|\U0001f9db\U0001f3fd\u200d\u2640\ufe0f|\U0001f9db\U0001f3fe\u200d\u2640\ufe0f|\U0001f9db\U0001f3ff\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fc\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fd\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fe\u200d\u2642\ufe0f|\U0001f9dc\U0001f3ff\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fc\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fd\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fe\u200d\u2640\ufe0f|\U0001f9dc\U0001f3ff\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fc\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fd\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fe\u200d\u2642\ufe0f|\U0001f9dd\U0001f3ff\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fc\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fd\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fe\u200d\u2640\ufe0f|\U0001f9dd\U0001f3ff\u200d\u2640\ufe0f|\U0001f486\U0001f3fb\u200d\u2642\ufe0f|\U0001f486\U0001f3fc\u200d\u2642\ufe0f|\U0001f486\U0001f3fd\u200d\u2642\ufe0f|\U0001f486\U0001f3fe\u200d\u2642\ufe0f|\U0001f486\U0001f3ff\u200d\u2642\ufe0f|\U0001f486\U0001f3fb\u200d\u2640\ufe0f|\U0001f486\U0001f3fc\u200d\u2640\ufe0f|\U0001f486\U0001f3fd\u200d\u2640\ufe0f|\U0001f486\U0001f3fe\u200d\u2640\ufe0f|\U0001f486\U0001f3ff\u200d\u2640\ufe0f|\U0001f487\U0001f3fb\u200d\u2642\ufe0f|\U0001f487\U0001f3fc\u200d\u2642\ufe0f|\U0001f487\U0001f3fd\u200d\u2642\ufe0f|\U0001f487\U0001f3fe\u200d\u2642\ufe0f|\U0001f487\U0001f3ff\u200d\u2642\ufe0f|\U0001f487\U0001f3fb\u200d\u2640\ufe0f|\U0001f487\U0001f3fc\u200d\u2640\ufe0f|\U0001f487\U0001f3fd\u200d\u2640\ufe0f|\U0001f487\U0001f3fe\u200d\u2640\ufe0f|\U0001f487\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b6\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b6\U0001f3ff\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fc\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fd\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fe\u200d\u2642\ufe0f|\U0001f9cd\U0001f3ff\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fc\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fd\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fe\u200d\u2640\ufe0f|\U0001f9cd\U0001f3ff\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fc\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fd\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fe\u200d\u2642\ufe0f|\U0001f9ce\U0001f3ff\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fc\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fd\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fe\u200d\u2640\ufe0f|\U0001f9ce\U0001f3ff\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fc\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fd\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fe\u200d\u2642\ufe0f|\U0001f3c3\U0001f3ff\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fc\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fd\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fe\u200d\u2640\ufe0f|\U0001f3c3\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d6\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d6\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d7\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d7\U0001f3ff\u200d\u2640\ufe0f|\U0001f3cc\ufe0f\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fb\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fc\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fd\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fe\u200d\u2642\ufe0f|\U0001f3cc\U0001f3ff\u200d\u2642\ufe0f|\U0001f3cc\ufe0f\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fb\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fc\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fd\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fe\u200d\u2640\ufe0f|\U0001f3cc\U0001f3ff\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fc\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fd\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fe\u200d\u2642\ufe0f|\U0001f3c4\U0001f3ff\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fc\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fd\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fe\u200d\u2640\ufe0f|\U0001f3c4\U0001f3ff\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fc\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fd\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fe\u200d\u2642\ufe0f|\U0001f6a3\U0001f3ff\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fc\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fd\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fe\u200d\u2640\ufe0f|\U0001f6a3\U0001f3ff\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fc\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fd\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fe\u200d\u2642\ufe0f|\U0001f3ca\U0001f3ff\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fc\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fd\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fe\u200d\u2640\ufe0f|\U0001f3ca\U0001f3ff\u200d\u2640\ufe0f|\u26f9\ufe0f\u200d\u2642\ufe0f|\u26f9\U0001f3fb\u200d\u2642\ufe0f|\u26f9\U0001f3fc\u200d\u2642\ufe0f|\u26f9\U0001f3fd\u200d\u2642\ufe0f|\u26f9\U0001f3fe\u200d\u2642\ufe0f|\u26f9\U0001f3ff\u200d\u2642\ufe0f|\u26f9\ufe0f\u200d\u2640\ufe0f|\u26f9\U0001f3fb\u200d\u2640\ufe0f|\u26f9\U0001f3fc\u200d\u2640\ufe0f|\u26f9\U0001f3fd\u200d\u2640\ufe0f|\u26f9\U0001f3fe\u200d\u2640\ufe0f|\u26f9\U0001f3ff\u200d\u2640\ufe0f|\U0001f3cb\ufe0f\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fb\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fc\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fd\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fe\u200d\u2642\ufe0f|\U0001f3cb\U0001f3ff\u200d\u2642\ufe0f|\U0001f3cb\ufe0f\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fb\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fc\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fd\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fe\u200d\u2640\ufe0f|\U0001f3cb\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b4\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b4\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b5\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b5\U0001f3ff\u200d\u2640\ufe0f|\U0001f938\U0001f3fb\u200d\u2642\ufe0f|\U0001f938\U0001f3fc\u200d\u2642\ufe0f|\U0001f938\U0001f3fd\u200d\u2642\ufe0f|\U0001f938\U0001f3fe\u200d\u2642\ufe0f|\U0001f938\U0001f3ff\u200d\u2642\ufe0f|\U0001f938\U0001f3fb\u200d\u2640\ufe0f|\U0001f938\U0001f3fc\u200d\u2640\ufe0f|\U0001f938\U0001f3fd\u200d\u2640\ufe0f|\U0001f938\U0001f3fe\u200d\u2640\ufe0f|\U0001f938\U0001f3ff\u200d\u2640\ufe0f|\U0001f93d\U0001f3fb\u200d\u2642\ufe0f|\U0001f93d\U0001f3fc\u200d\u2642\ufe0f|\U0001f93d\U0001f3fd\u200d\u2642\ufe0f|\U0001f93d\U0001f3fe\u200d\u2642\ufe0f|\U0001f93d\U0001f3ff\u200d\u2642\ufe0f|\U0001f93d\U0001f3fb\u200d\u2640\ufe0f|\U0001f93d\U0001f3fc\u200d\u2640\ufe0f|\U0001f93d\U0001f3fd\u200d\u2640\ufe0f|\U0001f93d\U0001f3fe\u200d\u2640\ufe0f|\U0001f93d\U0001f3ff\u200d\u2640\ufe0f|\U0001f93e\U0001f3fb\u200d\u2642\ufe0f|\U0001f93e\U0001f3fc\u200d\u2642\ufe0f|\U0001f93e\U0001f3fd\u200d\u2642\ufe0f|\U0001f93e\U0001f3fe\u200d\u2642\ufe0f|\U0001f93e\U0001f3ff\u200d\u2642\ufe0f|\U0001f93e\U0001f3fb\u200d\u2640\ufe0f|\U0001f93e\U0001f3fc\u200d\u2640\ufe0f|\U0001f93e\U0001f3fd\u200d\u2640\ufe0f|\U0001f93e\U0001f3fe\u200d\u2640\ufe0f|\U0001f93e\U0001f3ff\u200d\u2640\ufe0f|\U0001f939\U0001f3fb\u200d\u2642\ufe0f|\U0001f939\U0001f3fc\u200d\u2642\ufe0f|\U0001f939\U0001f3fd\u200d\u2642\ufe0f|\U0001f939\U0001f3fe\u200d\u2642\ufe0f|\U0001f939\U0001f3ff\u200d\u2642\ufe0f|\U0001f939\U0001f3fb\u200d\u2640\ufe0f|\U0001f939\U0001f3fc\u200d\u2640\ufe0f|\U0001f939\U0001f3fd\u200d\u2640\ufe0f|\U0001f939\U0001f3fe\u200d\u2640\ufe0f|\U0001f939\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d8\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d8\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d1\u200d\U0001f91d\u200d\U0001f9d1|\U0001f469\u200d\u2764\u200d\U0001f468|\U0001f468\u200d\u2764\u200d\U0001f468|\U0001f469\u200d\u2764\u200d\U0001f469|\U0001f468\u200d\U0001f469\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f467|\U0001f468\u200d\U0001f468\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f467|\U0001f469\u200d\U0001f469\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f467|\U0001f468\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f467\u200d\U0001f467|\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f441\u200d\U0001f5e8\ufe0f|\U0001f441\ufe0f\u200d\U0001f5e8|\U0001f471\u200d\u2642\ufe0f|\U0001f471\U0001f3fb\u200d\u2642|\U0001f471\U0001f3fc\u200d\u2642|\U0001f471\U0001f3fd\u200d\u2642|\U0001f471\U0001f3fe\u200d\u2642|\U0001f471\U0001f3ff\u200d\u2642|\U0001f468\U0001f3fb\u200d\U0001f9b0|\U0001f468\U0001f3fc\u200d\U0001f9b0|\U0001f468\U0001f3fd\u200d\U0001f9b0|\U0001f468\U0001f3fe\u200d\U0001f9b0|\U0001f468\U0001f3ff\u200d\U0001f9b0|\U0001f468\U0001f3fb\u200d\U0001f9b1|\U0001f468\U0001f3fc\u200d\U0001f9b1|\U0001f468\U0001f3fd\u200d\U0001f9b1|\U0001f468\U0001f3fe\u200d\U0001f9b1|\U0001f468\U0001f3ff\u200d\U0001f9b1|\U0001f468\U0001f3fb\u200d\U0001f9b3|\U0001f468\U0001f3fc\u200d\U0001f9b3|\U0001f468\U0001f3fd\u200d\U0001f9b3|\U0001f468\U0001f3fe\u200d\U0001f9b3|\U0001f468\U0001f3ff\u200d\U0001f9b3|\U0001f468\U0001f3fb\u200d\U0001f9b2|\U0001f468\U0001f3fc\u200d\U0001f9b2|\U0001f468\U0001f3fd\u200d\U0001f9b2|\U0001f468\U0001f3fe\u200d\U0001f9b2|\U0001f468\U0001f3ff\u200d\U0001f9b2|\U0001f471\u200d\u2640\ufe0f|\U0001f471\U0001f3fb\u200d\u2640|\U0001f471\U0001f3fc\u200d\u2640|\U0001f471\U0001f3fd\u200d\u2640|\U0001f471\U0001f3fe\u200d\u2640|\U0001f471\U0001f3ff\u200d\u2640|\U0001f469\U0001f3fb\u200d\U0001f9b0|\U0001f469\U0001f3fc\u200d\U0001f9b0|\U0001f469\U0001f3fd\u200d\U0001f9b0|\U0001f469\U0001f3fe\u200d\U0001f9b0|\U0001f469\U0001f3ff\u200d\U0001f9b0|\U0001f469\U0001f3fb\u200d\U0001f9b1|\U0001f469\U0001f3fc\u200d\U0001f9b1|\U0001f469\U0001f3fd\u200d\U0001f9b1|\U0001f469\U0001f3fe\u200d\U0001f9b1|\U0001f469\U0001f3ff\u200d\U0001f9b1|\U0001f469\U0001f3fb\u200d\U0001f9b3|\U0001f469\U0001f3fc\u200d\U0001f9b3|\U0001f469\U0001f3fd\u200d\U0001f9b3|\U0001f469\U0001f3fe\u200d\U0001f9b3|\U0001f469\U0001f3ff\u200d\U0001f9b3|\U0001f469\U0001f3fb\u200d\U0001f9b2|\U0001f469\U0001f3fc\u200d\U0001f9b2|\U0001f469\U0001f3fd\u200d\U0001f9b2|\U0001f469\U0001f3fe\u200d\U0001f9b2|\U0001f469\U0001f3ff\u200d\U0001f9b2|\U0001f64d\u200d\u2642\ufe0f|\U0001f64d\U0001f3fb\u200d\u2642|\U0001f64d\U0001f3fc\u200d\u2642|\U0001f64d\U0001f3fd\u200d\u2642|\U0001f64d\U0001f3fe\u200d\u2642|\U0001f64d\U0001f3ff\u200d\u2642|\U0001f64d\u200d\u2640\ufe0f|\U0001f64d\U0001f3fb\u200d\u2640|\U0001f64d\U0001f3fc\u200d\u2640|\U0001f64d\U0001f3fd\u200d\u2640|\U0001f64d\U0001f3fe\u200d\u2640|\U0001f64d\U0001f3ff\u200d\u2640|\U0001f64e\u200d\u2642\ufe0f|\U0001f64e\U0001f3fb\u200d\u2642|\U0001f64e\U0001f3fc\u200d\u2642|\U0001f64e\U0001f3fd\u200d\u2642|\U0001f64e\U0001f3fe\u200d\u2642|\U0001f64e\U0001f3ff\u200d\u2642|\U0001f64e\u200d\u2640\ufe0f|\U0001f64e\U0001f3fb\u200d\u2640|\U0001f64e\U0001f3fc\u200d\u2640|\U0001f64e\U0001f3fd\u200d\u2640|\U0001f64e\U0001f3fe\u200d\u2640|\U0001f64e\U0001f3ff\u200d\u2640|\U0001f645\u200d\u2642\ufe0f|\U0001f645\U0001f3fb\u200d\u2642|\U0001f645\U0001f3fc\u200d\u2642|\U0001f645\U0001f3fd\u200d\u2642|\U0001f645\U0001f3fe\u200d\u2642|\U0001f645\U0001f3ff\u200d\u2642|\U0001f645\u200d\u2640\ufe0f|\U0001f645\U0001f3fb\u200d\u2640|\U0001f645\U0001f3fc\u200d\u2640|\U0001f645\U0001f3fd\u200d\u2640|\U0001f645\U0001f3fe\u200d\u2640|\U0001f645\U0001f3ff\u200d\u2640|\U0001f646\u200d\u2642\ufe0f|\U0001f646\U0001f3fb\u200d\u2642|\U0001f646\U0001f3fc\u200d\u2642|\U0001f646\U0001f3fd\u200d\u2642|\U0001f646\U0001f3fe\u200d\u2642|\U0001f646\U0001f3ff\u200d\u2642|\U0001f646\u200d\u2640\ufe0f|\U0001f646\U0001f3fb\u200d\u2640|\U0001f646\U0001f3fc\u200d\u2640|\U0001f646\U0001f3fd\u200d\u2640|\U0001f646\U0001f3fe\u200d\u2640|\U0001f646\U0001f3ff\u200d\u2640|\U0001f481\u200d\u2642\ufe0f|\U0001f481\U0001f3fb\u200d\u2642|\U0001f481\U0001f3fc\u200d\u2642|\U0001f481\U0001f3fd\u200d\u2642|\U0001f481\U0001f3fe\u200d\u2642|\U0001f481\U0001f3ff\u200d\u2642|\U0001f481\u200d\u2640\ufe0f|\U0001f481\U0001f3fb\u200d\u2640|\U0001f481\U0001f3fc\u200d\u2640|\U0001f481\U0001f3fd\u200d\u2640|\U0001f481\U0001f3fe\u200d\u2640|\U0001f481\U0001f3ff\u200d\u2640|\U0001f64b\u200d\u2642\ufe0f|\U0001f64b\U0001f3fb\u200d\u2642|\U0001f64b\U0001f3fc\u200d\u2642|\U0001f64b\U0001f3fd\u200d\u2642|\U0001f64b\U0001f3fe\u200d\u2642|\U0001f64b\U0001f3ff\u200d\u2642|\U0001f64b\u200d\u2640\ufe0f|\U0001f64b\U0001f3fb\u200d\u2640|\U0001f64b\U0001f3fc\u200d\u2640|\U0001f64b\U0001f3fd\u200d\u2640|\U0001f64b\U0001f3fe\u200d\u2640|\U0001f64b\U0001f3ff\u200d\u2640|\U0001f9cf\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2642|\U0001f9cf\U0001f3fc\u200d\u2642|\U0001f9cf\U0001f3fd\u200d\u2642|\U0001f9cf\U0001f3fe\u200d\u2642|\U0001f9cf\U0001f3ff\u200d\u2642|\U0001f9cf\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2640|\U0001f9cf\U0001f3fc\u200d\u2640|\U0001f9cf\U0001f3fd\u200d\u2640|\U0001f9cf\U0001f3fe\u200d\u2640|\U0001f9cf\U0001f3ff\u200d\u2640|\U0001f647\u200d\u2642\ufe0f|\U0001f647\U0001f3fb\u200d\u2642|\U0001f647\U0001f3fc\u200d\u2642|\U0001f647\U0001f3fd\u200d\u2642|\U0001f647\U0001f3fe\u200d\u2642|\U0001f647\U0001f3ff\u200d\u2642|\U0001f647\u200d\u2640\ufe0f|\U0001f647\U0001f3fb\u200d\u2640|\U0001f647\U0001f3fc\u200d\u2640|\U0001f647\U0001f3fd\u200d\u2640|\U0001f647\U0001f3fe\u200d\u2640|\U0001f647\U0001f3ff\u200d\u2640|\U0001f926\u200d\u2642\ufe0f|\U0001f926\U0001f3fb\u200d\u2642|\U0001f926\U0001f3fc\u200d\u2642|\U0001f926\U0001f3fd\u200d\u2642|\U0001f926\U0001f3fe\u200d\u2642|\U0001f926\U0001f3ff\u200d\u2642|\U0001f926\u200d\u2640\ufe0f|\U0001f926\U0001f3fb\u200d\u2640|\U0001f926\U0001f3fc\u200d\u2640|\U0001f926\U0001f3fd\u200d\u2640|\U0001f926\U0001f3fe\u200d\u2640|\U0001f926\U0001f3ff\u200d\u2640|\U0001f937\u200d\u2642\ufe0f|\U0001f937\U0001f3fb\u200d\u2642|\U0001f937\U0001f3fc\u200d\u2642|\U0001f937\U0001f3fd\u200d\u2642|\U0001f937\U0001f3fe\u200d\u2642|\U0001f937\U0001f3ff\u200d\u2642|\U0001f937\u200d\u2640\ufe0f|\U0001f937\U0001f3fb\u200d\u2640|\U0001f937\U0001f3fc\u200d\u2640|\U0001f937\U0001f3fd\u200d\u2640|\U0001f937\U0001f3fe\u200d\u2640|\U0001f937\U0001f3ff\u200d\u2640|\U0001f468\u200d\u2695\ufe0f|\U0001f468\U0001f3fb\u200d\u2695|\U0001f468\U0001f3fc\u200d\u2695|\U0001f468\U0001f3fd\u200d\u2695|\U0001f468\U0001f3fe\u200d\u2695|\U0001f468\U0001f3ff\u200d\u2695|\U0001f469\u200d\u2695\ufe0f|\U0001f469\U0001f3fb\u200d\u2695|\U0001f469\U0001f3fc\u200d\u2695|\U0001f469\U0001f3fd\u200d\u2695|\U0001f469\U0001f3fe\u200d\u2695|\U0001f469\U0001f3ff\u200d\u2695|\U0001f468\U0001f3fb\u200d\U0001f393|\U0001f468\U0001f3fc\u200d\U0001f393|\U0001f468\U0001f3fd\u200d\U0001f393|\U0001f468\U0001f3fe\u200d\U0001f393|\U0001f468\U0001f3ff\u200d\U0001f393|\U0001f469\U0001f3fb\u200d\U0001f393|\U0001f469\U0001f3fc\u200d\U0001f393|\U0001f469\U0001f3fd\u200d\U0001f393|\U0001f469\U0001f3fe\u200d\U0001f393|\U0001f469\U0001f3ff\u200d\U0001f393|\U0001f468\U0001f3fb\u200d\U0001f3eb|\U0001f468\U0001f3fc\u200d\U0001f3eb|\U0001f468\U0001f3fd\u200d\U0001f3eb|\U0001f468\U0001f3fe\u200d\U0001f3eb|\U0001f468\U0001f3ff\u200d\U0001f3eb|\U0001f469\U0001f3fb\u200d\U0001f3eb|\U0001f469\U0001f3fc\u200d\U0001f3eb|\U0001f469\U0001f3fd\u200d\U0001f3eb|\U0001f469\U0001f3fe\u200d\U0001f3eb|\U0001f469\U0001f3ff\u200d\U0001f3eb|\U0001f468\u200d\u2696\ufe0f|\U0001f468\U0001f3fb\u200d\u2696|\U0001f468\U0001f3fc\u200d\u2696|\U0001f468\U0001f3fd\u200d\u2696|\U0001f468\U0001f3fe\u200d\u2696|\U0001f468\U0001f3ff\u200d\u2696|\U0001f469\u200d\u2696\ufe0f|\U0001f469\U0001f3fb\u200d\u2696|\U0001f469\U0001f3fc\u200d\u2696|\U0001f469\U0001f3fd\u200d\u2696|\U0001f469\U0001f3fe\u200d\u2696|\U0001f469\U0001f3ff\u200d\u2696|\U0001f468\U0001f3fb\u200d\U0001f33e|\U0001f468\U0001f3fc\u200d\U0001f33e|\U0001f468\U0001f3fd\u200d\U0001f33e|\U0001f468\U0001f3fe\u200d\U0001f33e|\U0001f468\U0001f3ff\u200d\U0001f33e|\U0001f469\U0001f3fb\u200d\U0001f33e|\U0001f469\U0001f3fc\u200d\U0001f33e|\U0001f469\U0001f3fd\u200d\U0001f33e|\U0001f469\U0001f3fe\u200d\U0001f33e|\U0001f469\U0001f3ff\u200d\U0001f33e|\U0001f468\U0001f3fb\u200d\U0001f373|\U0001f468\U0001f3fc\u200d\U0001f373|\U0001f468\U0001f3fd\u200d\U0001f373|\U0001f468\U0001f3fe\u200d\U0001f373|\U0001f468\U0001f3ff\u200d\U0001f373|\U0001f469\U0001f3fb\u200d\U0001f373|\U0001f469\U0001f3fc\u200d\U0001f373|\U0001f469\U0001f3fd\u200d\U0001f373|\U0001f469\U0001f3fe\u200d\U0001f373|\U0001f469\U0001f3ff\u200d\U0001f373|\U0001f468\U0001f3fb\u200d\U0001f527|\U0001f468\U0001f3fc\u200d\U0001f527|\U0001f468\U0001f3fd\u200d\U0001f527|\U0001f468\U0001f3fe\u200d\U0001f527|\U0001f468\U0001f3ff\u200d\U0001f527|\U0001f469\U0001f3fb\u200d\U0001f527|\U0001f469\U0001f3fc\u200d\U0001f527|\U0001f469\U0001f3fd\u200d\U0001f527|\U0001f469\U0001f3fe\u200d\U0001f527|\U0001f469\U0001f3ff\u200d\U0001f527|\U0001f468\U0001f3fb\u200d\U0001f3ed|\U0001f468\U0001f3fc\u200d\U0001f3ed|\U0001f468\U0001f3fd\u200d\U0001f3ed|\U0001f468\U0001f3fe\u200d\U0001f3ed|\U0001f468\U0001f3ff\u200d\U0001f3ed|\U0001f469\U0001f3fb\u200d\U0001f3ed|\U0001f469\U0001f3fc\u200d\U0001f3ed|\U0001f469\U0001f3fd\u200d\U0001f3ed|\U0001f469\U0001f3fe\u200d\U0001f3ed|\U0001f469\U0001f3ff\u200d\U0001f3ed|\U0001f468\U0001f3fb\u200d\U0001f4bc|\U0001f468\U0001f3fc\u200d\U0001f4bc|\U0001f468\U0001f3fd\u200d\U0001f4bc|\U0001f468\U0001f3fe\u200d\U0001f4bc|\U0001f468\U0001f3ff\u200d\U0001f4bc|\U0001f469\U0001f3fb\u200d\U0001f4bc|\U0001f469\U0001f3fc\u200d\U0001f4bc|\U0001f469\U0001f3fd\u200d\U0001f4bc|\U0001f469\U0001f3fe\u200d\U0001f4bc|\U0001f469\U0001f3ff\u200d\U0001f4bc|\U0001f468\U0001f3fb\u200d\U0001f52c|\U0001f468\U0001f3fc\u200d\U0001f52c|\U0001f468\U0001f3fd\u200d\U0001f52c|\U0001f468\U0001f3fe\u200d\U0001f52c|\U0001f468\U0001f3ff\u200d\U0001f52c|\U0001f469\U0001f3fb\u200d\U0001f52c|\U0001f469\U0001f3fc\u200d\U0001f52c|\U0001f469\U0001f3fd\u200d\U0001f52c|\U0001f469\U0001f3fe\u200d\U0001f52c|\U0001f469\U0001f3ff\u200d\U0001f52c|\U0001f468\U0001f3fb\u200d\U0001f4bb|\U0001f468\U0001f3fc\u200d\U0001f4bb|\U0001f468\U0001f3fd\u200d\U0001f4bb|\U0001f468\U0001f3fe\u200d\U0001f4bb|\U0001f468\U0001f3ff\u200d\U0001f4bb|\U0001f469\U0001f3fb\u200d\U0001f4bb|\U0001f469\U0001f3fc\u200d\U0001f4bb|\U0001f469\U0001f3fd\u200d\U0001f4bb|\U0001f469\U0001f3fe\u200d\U0001f4bb|\U0001f469\U0001f3ff\u200d\U0001f4bb|\U0001f468\U0001f3fb\u200d\U0001f3a4|\U0001f468\U0001f3fc\u200d\U0001f3a4|\U0001f468\U0001f3fd\u200d\U0001f3a4|\U0001f468\U0001f3fe\u200d\U0001f3a4|\U0001f468\U0001f3ff\u200d\U0001f3a4|\U0001f469\U0001f3fb\u200d\U0001f3a4|\U0001f469\U0001f3fc\u200d\U0001f3a4|\U0001f469\U0001f3fd\u200d\U0001f3a4|\U0001f469\U0001f3fe\u200d\U0001f3a4|\U0001f469\U0001f3ff\u200d\U0001f3a4|\U0001f468\U0001f3fb\u200d\U0001f3a8|\U0001f468\U0001f3fc\u200d\U0001f3a8|\U0001f468\U0001f3fd\u200d\U0001f3a8|\U0001f468\U0001f3fe\u200d\U0001f3a8|\U0001f468\U0001f3ff\u200d\U0001f3a8|\U0001f469\U0001f3fb\u200d\U0001f3a8|\U0001f469\U0001f3fc\u200d\U0001f3a8|\U0001f469\U0001f3fd\u200d\U0001f3a8|\U0001f469\U0001f3fe\u200d\U0001f3a8|\U0001f469\U0001f3ff\u200d\U0001f3a8|\U0001f468\u200d\u2708\ufe0f|\U0001f468\U0001f3fb\u200d\u2708|\U0001f468\U0001f3fc\u200d\u2708|\U0001f468\U0001f3fd\u200d\u2708|\U0001f468\U0001f3fe\u200d\u2708|\U0001f468\U0001f3ff\u200d\u2708|\U0001f469\u200d\u2708\ufe0f|\U0001f469\U0001f3fb\u200d\u2708|\U0001f469\U0001f3fc\u200d\u2708|\U0001f469\U0001f3fd\u200d\u2708|\U0001f469\U0001f3fe\u200d\u2708|\U0001f469\U0001f3ff\u200d\u2708|\U0001f468\U0001f3fb\u200d\U0001f680|\U0001f468\U0001f3fc\u200d\U0001f680|\U0001f468\U0001f3fd\u200d\U0001f680|\U0001f468\U0001f3fe\u200d\U0001f680|\U0001f468\U0001f3ff\u200d\U0001f680|\U0001f469\U0001f3fb\u200d\U0001f680|\U0001f469\U0001f3fc\u200d\U0001f680|\U0001f469\U0001f3fd\u200d\U0001f680|\U0001f469\U0001f3fe\u200d\U0001f680|\U0001f469\U0001f3ff\u200d\U0001f680|\U0001f468\U0001f3fb\u200d\U0001f692|\U0001f468\U0001f3fc\u200d\U0001f692|\U0001f468\U0001f3fd\u200d\U0001f692|\U0001f468\U0001f3fe\u200d\U0001f692|\U0001f468\U0001f3ff\u200d\U0001f692|\U0001f469\U0001f3fb\u200d\U0001f692|\U0001f469\U0001f3fc\u200d\U0001f692|\U0001f469\U0001f3fd\u200d\U0001f692|\U0001f469\U0001f3fe\u200d\U0001f692|\U0001f469\U0001f3ff\u200d\U0001f692|\U0001f46e\u200d\u2642\ufe0f|\U0001f46e\U0001f3fb\u200d\u2642|\U0001f46e\U0001f3fc\u200d\u2642|\U0001f46e\U0001f3fd\u200d\u2642|\U0001f46e\U0001f3fe\u200d\u2642|\U0001f46e\U0001f3ff\u200d\u2642|\U0001f46e\u200d\u2640\ufe0f|\U0001f46e\U0001f3fb\u200d\u2640|\U0001f46e\U0001f3fc\u200d\u2640|\U0001f46e\U0001f3fd\u200d\u2640|\U0001f46e\U0001f3fe\u200d\u2640|\U0001f46e\U0001f3ff\u200d\u2640|\U0001f575\u200d\u2642\ufe0f|\U0001f575\ufe0f\u200d\u2642|\U0001f575\U0001f3fb\u200d\u2642|\U0001f575\U0001f3fc\u200d\u2642|\U0001f575\U0001f3fd\u200d\u2642|\U0001f575\U0001f3fe\u200d\u2642|\U0001f575\U0001f3ff\u200d\u2642|\U0001f575\u200d\u2640\ufe0f|\U0001f575\ufe0f\u200d\u2640|\U0001f575\U0001f3fb\u200d\u2640|\U0001f575\U0001f3fc\u200d\u2640|\U0001f575\U0001f3fd\u200d\u2640|\U0001f575\U0001f3fe\u200d\u2640|\U0001f575\U0001f3ff\u200d\u2640|\U0001f482\u200d\u2642\ufe0f|\U0001f482\U0001f3fb\u200d\u2642|\U0001f482\U0001f3fc\u200d\u2642|\U0001f482\U0001f3fd\u200d\u2642|\U0001f482\U0001f3fe\u200d\u2642|\U0001f482\U0001f3ff\u200d\u2642|\U0001f482\u200d\u2640\ufe0f|\U0001f482\U0001f3fb\u200d\u2640|\U0001f482\U0001f3fc\u200d\u2640|\U0001f482\U0001f3fd\u200d\u2640|\U0001f482\U0001f3fe\u200d\u2640|\U0001f482\U0001f3ff\u200d\u2640|\U0001f477\u200d\u2642\ufe0f|\U0001f477\U0001f3fb\u200d\u2642|\U0001f477\U0001f3fc\u200d\u2642|\U0001f477\U0001f3fd\u200d\u2642|\U0001f477\U0001f3fe\u200d\u2642|\U0001f477\U0001f3ff\u200d\u2642|\U0001f477\u200d\u2640\ufe0f|\U0001f477\U0001f3fb\u200d\u2640|\U0001f477\U0001f3fc\u200d\u2640|\U0001f477\U0001f3fd\u200d\u2640|\U0001f477\U0001f3fe\u200d\u2640|\U0001f477\U0001f3ff\u200d\u2640|\U0001f473\u200d\u2642\ufe0f|\U0001f473\U0001f3fb\u200d\u2642|\U0001f473\U0001f3fc\u200d\u2642|\U0001f473\U0001f3fd\u200d\u2642|\U0001f473\U0001f3fe\u200d\u2642|\U0001f473\U0001f3ff\u200d\u2642|\U0001f473\u200d\u2640\ufe0f|\U0001f473\U0001f3fb\u200d\u2640|\U0001f473\U0001f3fc\u200d\u2640|\U0001f473\U0001f3fd\u200d\u2640|\U0001f473\U0001f3fe\u200d\u2640|\U0001f473\U0001f3ff\u200d\u2640|\U0001f9b8\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2642|\U0001f9b8\U0001f3fc\u200d\u2642|\U0001f9b8\U0001f3fd\u200d\u2642|\U0001f9b8\U0001f3fe\u200d\u2642|\U0001f9b8\U0001f3ff\u200d\u2642|\U0001f9b8\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2640|\U0001f9b8\U0001f3fc\u200d\u2640|\U0001f9b8\U0001f3fd\u200d\u2640|\U0001f9b8\U0001f3fe\u200d\u2640|\U0001f9b8\U0001f3ff\u200d\u2640|\U0001f9b9\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2642|\U0001f9b9\U0001f3fc\u200d\u2642|\U0001f9b9\U0001f3fd\u200d\u2642|\U0001f9b9\U0001f3fe\u200d\u2642|\U0001f9b9\U0001f3ff\u200d\u2642|\U0001f9b9\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2640|\U0001f9b9\U0001f3fc\u200d\u2640|\U0001f9b9\U0001f3fd\u200d\u2640|\U0001f9b9\U0001f3fe\u200d\u2640|\U0001f9b9\U0001f3ff\u200d\u2640|\U0001f9d9\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2642|\U0001f9d9\U0001f3fc\u200d\u2642|\U0001f9d9\U0001f3fd\u200d\u2642|\U0001f9d9\U0001f3fe\u200d\u2642|\U0001f9d9\U0001f3ff\u200d\u2642|\U0001f9d9\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2640|\U0001f9d9\U0001f3fc\u200d\u2640|\U0001f9d9\U0001f3fd\u200d\u2640|\U0001f9d9\U0001f3fe\u200d\u2640|\U0001f9d9\U0001f3ff\u200d\u2640|\U0001f9da\u200d\u2642\ufe0f|\U0001f9da\U0001f3fb\u200d\u2642|\U0001f9da\U0001f3fc\u200d\u2642|\U0001f9da\U0001f3fd\u200d\u2642|\U0001f9da\U0001f3fe\u200d\u2642|\U0001f9da\U0001f3ff\u200d\u2642|\U0001f9da\u200d\u2640\ufe0f|\U0001f9da\U0001f3fb\u200d\u2640|\U0001f9da\U0001f3fc\u200d\u2640|\U0001f9da\U0001f3fd\u200d\u2640|\U0001f9da\U0001f3fe\u200d\u2640|\U0001f9da\U0001f3ff\u200d\u2640|\U0001f9db\u200d\u2642\ufe0f|\U0001f9db\U0001f3fb\u200d\u2642|\U0001f9db\U0001f3fc\u200d\u2642|\U0001f9db\U0001f3fd\u200d\u2642|\U0001f9db\U0001f3fe\u200d\u2642|\U0001f9db\U0001f3ff\u200d\u2642|\U0001f9db\u200d\u2640\ufe0f|\U0001f9db\U0001f3fb\u200d\u2640|\U0001f9db\U0001f3fc\u200d\u2640|\U0001f9db\U0001f3fd\u200d\u2640|\U0001f9db\U0001f3fe\u200d\u2640|\U0001f9db\U0001f3ff\u200d\u2640|\U0001f9dc\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2642|\U0001f9dc\U0001f3fc\u200d\u2642|\U0001f9dc\U0001f3fd\u200d\u2642|\U0001f9dc\U0001f3fe\u200d\u2642|\U0001f9dc\U0001f3ff\u200d\u2642|\U0001f9dc\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2640|\U0001f9dc\U0001f3fc\u200d\u2640|\U0001f9dc\U0001f3fd\u200d\u2640|\U0001f9dc\U0001f3fe\u200d\u2640|\U0001f9dc\U0001f3ff\u200d\u2640|\U0001f9dd\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2642|\U0001f9dd\U0001f3fc\u200d\u2642|\U0001f9dd\U0001f3fd\u200d\u2642|\U0001f9dd\U0001f3fe\u200d\u2642|\U0001f9dd\U0001f3ff\u200d\u2642|\U0001f9dd\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2640|\U0001f9dd\U0001f3fc\u200d\u2640|\U0001f9dd\U0001f3fd\u200d\u2640|\U0001f9dd\U0001f3fe\u200d\u2640|\U0001f9dd\U0001f3ff\u200d\u2640|\U0001f9de\u200d\u2642\ufe0f|\U0001f9de\u200d\u2640\ufe0f|\U0001f9df\u200d\u2642\ufe0f|\U0001f9df\u200d\u2640\ufe0f|\U0001f486\u200d\u2642\ufe0f|\U0001f486\U0001f3fb\u200d\u2642|\U0001f486\U0001f3fc\u200d\u2642|\U0001f486\U0001f3fd\u200d\u2642|\U0001f486\U0001f3fe\u200d\u2642|\U0001f486\U0001f3ff\u200d\u2642|\U0001f486\u200d\u2640\ufe0f|\U0001f486\U0001f3fb\u200d\u2640|\U0001f486\U0001f3fc\u200d\u2640|\U0001f486\U0001f3fd\u200d\u2640|\U0001f486\U0001f3fe\u200d\u2640|\U0001f486\U0001f3ff\u200d\u2640|\U0001f487\u200d\u2642\ufe0f|\U0001f487\U0001f3fb\u200d\u2642|\U0001f487\U0001f3fc\u200d\u2642|\U0001f487\U0001f3fd\u200d\u2642|\U0001f487\U0001f3fe\u200d\u2642|\U0001f487\U0001f3ff\u200d\u2642|\U0001f487\u200d\u2640\ufe0f|\U0001f487\U0001f3fb\u200d\u2640|\U0001f487\U0001f3fc\u200d\u2640|\U0001f487\U0001f3fd\u200d\u2640|\U0001f487\U0001f3fe\u200d\u2640|\U0001f487\U0001f3ff\u200d\u2640|\U0001f6b6\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2642|\U0001f6b6\U0001f3fc\u200d\u2642|\U0001f6b6\U0001f3fd\u200d\u2642|\U0001f6b6\U0001f3fe\u200d\u2642|\U0001f6b6\U0001f3ff\u200d\u2642|\U0001f6b6\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2640|\U0001f6b6\U0001f3fc\u200d\u2640|\U0001f6b6\U0001f3fd\u200d\u2640|\U0001f6b6\U0001f3fe\u200d\u2640|\U0001f6b6\U0001f3ff\u200d\u2640|\U0001f9cd\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2642|\U0001f9cd\U0001f3fc\u200d\u2642|\U0001f9cd\U0001f3fd\u200d\u2642|\U0001f9cd\U0001f3fe\u200d\u2642|\U0001f9cd\U0001f3ff\u200d\u2642|\U0001f9cd\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2640|\U0001f9cd\U0001f3fc\u200d\u2640|\U0001f9cd\U0001f3fd\u200d\u2640|\U0001f9cd\U0001f3fe\u200d\u2640|\U0001f9cd\U0001f3ff\u200d\u2640|\U0001f9ce\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2642|\U0001f9ce\U0001f3fc\u200d\u2642|\U0001f9ce\U0001f3fd\u200d\u2642|\U0001f9ce\U0001f3fe\u200d\u2642|\U0001f9ce\U0001f3ff\u200d\u2642|\U0001f9ce\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2640|\U0001f9ce\U0001f3fc\u200d\u2640|\U0001f9ce\U0001f3fd\u200d\u2640|\U0001f9ce\U0001f3fe\u200d\u2640|\U0001f9ce\U0001f3ff\u200d\u2640|\U0001f468\U0001f3fb\u200d\U0001f9af|\U0001f468\U0001f3fc\u200d\U0001f9af|\U0001f468\U0001f3fd\u200d\U0001f9af|\U0001f468\U0001f3fe\u200d\U0001f9af|\U0001f468\U0001f3ff\u200d\U0001f9af|\U0001f469\U0001f3fb\u200d\U0001f9af|\U0001f469\U0001f3fc\u200d\U0001f9af|\U0001f469\U0001f3fd\u200d\U0001f9af|\U0001f469\U0001f3fe\u200d\U0001f9af|\U0001f469\U0001f3ff\u200d\U0001f9af|\U0001f468\U0001f3fb\u200d\U0001f9bc|\U0001f468\U0001f3fc\u200d\U0001f9bc|\U0001f468\U0001f3fd\u200d\U0001f9bc|\U0001f468\U0001f3fe\u200d\U0001f9bc|\U0001f468\U0001f3ff\u200d\U0001f9bc|\U0001f469\U0001f3fb\u200d\U0001f9bc|\U0001f469\U0001f3fc\u200d\U0001f9bc|\U0001f469\U0001f3fd\u200d\U0001f9bc|\U0001f469\U0001f3fe\u200d\U0001f9bc|\U0001f469\U0001f3ff\u200d\U0001f9bc|\U0001f468\U0001f3fb\u200d\U0001f9bd|\U0001f468\U0001f3fc\u200d\U0001f9bd|\U0001f468\U0001f3fd\u200d\U0001f9bd|\U0001f468\U0001f3fe\u200d\U0001f9bd|\U0001f468\U0001f3ff\u200d\U0001f9bd|\U0001f469\U0001f3fb\u200d\U0001f9bd|\U0001f469\U0001f3fc\u200d\U0001f9bd|\U0001f469\U0001f3fd\u200d\U0001f9bd|\U0001f469\U0001f3fe\u200d\U0001f9bd|\U0001f469\U0001f3ff\u200d\U0001f9bd|\U0001f3c3\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2642|\U0001f3c3\U0001f3fc\u200d\u2642|\U0001f3c3\U0001f3fd\u200d\u2642|\U0001f3c3\U0001f3fe\u200d\u2642|\U0001f3c3\U0001f3ff\u200d\u2642|\U0001f3c3\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2640|\U0001f3c3\U0001f3fc\u200d\u2640|\U0001f3c3\U0001f3fd\u200d\u2640|\U0001f3c3\U0001f3fe\u200d\u2640|\U0001f3c3\U0001f3ff\u200d\u2640|\U0001f46f\u200d\u2642\ufe0f|\U0001f46f\u200d\u2640\ufe0f|\U0001f9d6\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2642|\U0001f9d6\U0001f3fc\u200d\u2642|\U0001f9d6\U0001f3fd\u200d\u2642|\U0001f9d6\U0001f3fe\u200d\u2642|\U0001f9d6\U0001f3ff\u200d\u2642|\U0001f9d6\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2640|\U0001f9d6\U0001f3fc\u200d\u2640|\U0001f9d6\U0001f3fd\u200d\u2640|\U0001f9d6\U0001f3fe\u200d\u2640|\U0001f9d6\U0001f3ff\u200d\u2640|\U0001f9d7\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2642|\U0001f9d7\U0001f3fc\u200d\u2642|\U0001f9d7\U0001f3fd\u200d\u2642|\U0001f9d7\U0001f3fe\u200d\u2642|\U0001f9d7\U0001f3ff\u200d\u2642|\U0001f9d7\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2640|\U0001f9d7\U0001f3fc\u200d\u2640|\U0001f9d7\U0001f3fd\u200d\u2640|\U0001f9d7\U0001f3fe\u200d\u2640|\U0001f9d7\U0001f3ff\u200d\u2640|\U0001f3cc\u200d\u2642\ufe0f|\U0001f3cc\ufe0f\u200d\u2642|\U0001f3cc\U0001f3fb\u200d\u2642|\U0001f3cc\U0001f3fc\u200d\u2642|\U0001f3cc\U0001f3fd\u200d\u2642|\U0001f3cc\U0001f3fe\u200d\u2642|\U0001f3cc\U0001f3ff\u200d\u2642|\U0001f3cc\u200d\u2640\ufe0f|\U0001f3cc\ufe0f\u200d\u2640|\U0001f3cc\U0001f3fb\u200d\u2640|\U0001f3cc\U0001f3fc\u200d\u2640|\U0001f3cc\U0001f3fd\u200d\u2640|\U0001f3cc\U0001f3fe\u200d\u2640|\U0001f3cc\U0001f3ff\u200d\u2640|\U0001f3c4\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2642|\U0001f3c4\U0001f3fc\u200d\u2642|\U0001f3c4\U0001f3fd\u200d\u2642|\U0001f3c4\U0001f3fe\u200d\u2642|\U0001f3c4\U0001f3ff\u200d\u2642|\U0001f3c4\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2640|\U0001f3c4\U0001f3fc\u200d\u2640|\U0001f3c4\U0001f3fd\u200d\u2640|\U0001f3c4\U0001f3fe\u200d\u2640|\U0001f3c4\U0001f3ff\u200d\u2640|\U0001f6a3\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2642|\U0001f6a3\U0001f3fc\u200d\u2642|\U0001f6a3\U0001f3fd\u200d\u2642|\U0001f6a3\U0001f3fe\u200d\u2642|\U0001f6a3\U0001f3ff\u200d\u2642|\U0001f6a3\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2640|\U0001f6a3\U0001f3fc\u200d\u2640|\U0001f6a3\U0001f3fd\u200d\u2640|\U0001f6a3\U0001f3fe\u200d\u2640|\U0001f6a3\U0001f3ff\u200d\u2640|\U0001f3ca\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2642|\U0001f3ca\U0001f3fc\u200d\u2642|\U0001f3ca\U0001f3fd\u200d\u2642|\U0001f3ca\U0001f3fe\u200d\u2642|\U0001f3ca\U0001f3ff\u200d\u2642|\U0001f3ca\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2640|\U0001f3ca\U0001f3fc\u200d\u2640|\U0001f3ca\U0001f3fd\u200d\u2640|\U0001f3ca\U0001f3fe\u200d\u2640|\U0001f3ca\U0001f3ff\u200d\u2640|\u26f9\u200d\u2642\ufe0f|\u26f9\ufe0f\u200d\u2642|\u26f9\U0001f3fb\u200d\u2642|\u26f9\U0001f3fc\u200d\u2642|\u26f9\U0001f3fd\u200d\u2642|\u26f9\U0001f3fe\u200d\u2642|\u26f9\U0001f3ff\u200d\u2642|\u26f9\u200d\u2640\ufe0f|\u26f9\ufe0f\u200d\u2640|\u26f9\U0001f3fb\u200d\u2640|\u26f9\U0001f3fc\u200d\u2640|\u26f9\U0001f3fd\u200d\u2640|\u26f9\U0001f3fe\u200d\u2640|\u26f9\U0001f3ff\u200d\u2640|\U0001f3cb\u200d\u2642\ufe0f|\U0001f3cb\ufe0f\u200d\u2642|\U0001f3cb\U0001f3fb\u200d\u2642|\U0001f3cb\U0001f3fc\u200d\u2642|\U0001f3cb\U0001f3fd\u200d\u2642|\U0001f3cb\U0001f3fe\u200d\u2642|\U0001f3cb\U0001f3ff\u200d\u2642|\U0001f3cb\u200d\u2640\ufe0f|\U0001f3cb\ufe0f\u200d\u2640|\U0001f3cb\U0001f3fb\u200d\u2640|\U0001f3cb\U0001f3fc\u200d\u2640|\U0001f3cb\U0001f3fd\u200d\u2640|\U0001f3cb\U0001f3fe\u200d\u2640|\U0001f3cb\U0001f3ff\u200d\u2640|\U0001f6b4\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2642|\U0001f6b4\U0001f3fc\u200d\u2642|\U0001f6b4\U0001f3fd\u200d\u2642|\U0001f6b4\U0001f3fe\u200d\u2642|\U0001f6b4\U0001f3ff\u200d\u2642|\U0001f6b4\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2640|\U0001f6b4\U0001f3fc\u200d\u2640|\U0001f6b4\U0001f3fd\u200d\u2640|\U0001f6b4\U0001f3fe\u200d\u2640|\U0001f6b4\U0001f3ff\u200d\u2640|\U0001f6b5\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2642|\U0001f6b5\U0001f3fc\u200d\u2642|\U0001f6b5\U0001f3fd\u200d\u2642|\U0001f6b5\U0001f3fe\u200d\u2642|\U0001f6b5\U0001f3ff\u200d\u2642|\U0001f6b5\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2640|\U0001f6b5\U0001f3fc\u200d\u2640|\U0001f6b5\U0001f3fd\u200d\u2640|\U0001f6b5\U0001f3fe\u200d\u2640|\U0001f6b5\U0001f3ff\u200d\u2640|\U0001f938\u200d\u2642\ufe0f|\U0001f938\U0001f3fb\u200d\u2642|\U0001f938\U0001f3fc\u200d\u2642|\U0001f938\U0001f3fd\u200d\u2642|\U0001f938\U0001f3fe\u200d\u2642|\U0001f938\U0001f3ff\u200d\u2642|\U0001f938\u200d\u2640\ufe0f|\U0001f938\U0001f3fb\u200d\u2640|\U0001f938\U0001f3fc\u200d\u2640|\U0001f938\U0001f3fd\u200d\u2640|\U0001f938\U0001f3fe\u200d\u2640|\U0001f938\U0001f3ff\u200d\u2640|\U0001f93c\u200d\u2642\ufe0f|\U0001f93c\u200d\u2640\ufe0f|\U0001f93d\u200d\u2642\ufe0f|\U0001f93d\U0001f3fb\u200d\u2642|\U0001f93d\U0001f3fc\u200d\u2642|\U0001f93d\U0001f3fd\u200d\u2642|\U0001f93d\U0001f3fe\u200d\u2642|\U0001f93d\U0001f3ff\u200d\u2642|\U0001f93d\u200d\u2640\ufe0f|\U0001f93d\U0001f3fb\u200d\u2640|\U0001f93d\U0001f3fc\u200d\u2640|\U0001f93d\U0001f3fd\u200d\u2640|\U0001f93d\U0001f3fe\u200d\u2640|\U0001f93d\U0001f3ff\u200d\u2640|\U0001f93e\u200d\u2642\ufe0f|\U0001f93e\U0001f3fb\u200d\u2642|\U0001f93e\U0001f3fc\u200d\u2642|\U0001f93e\U0001f3fd\u200d\u2642|\U0001f93e\U0001f3fe\u200d\u2642|\U0001f93e\U0001f3ff\u200d\u2642|\U0001f93e\u200d\u2640\ufe0f|\U0001f93e\U0001f3fb\u200d\u2640|\U0001f93e\U0001f3fc\u200d\u2640|\U0001f93e\U0001f3fd\u200d\u2640|\U0001f93e\U0001f3fe\u200d\u2640|\U0001f93e\U0001f3ff\u200d\u2640|\U0001f939\u200d\u2642\ufe0f|\U0001f939\U0001f3fb\u200d\u2642|\U0001f939\U0001f3fc\u200d\u2642|\U0001f939\U0001f3fd\u200d\u2642|\U0001f939\U0001f3fe\u200d\u2642|\U0001f939\U0001f3ff\u200d\u2642|\U0001f939\u200d\u2640\ufe0f|\U0001f939\U0001f3fb\u200d\u2640|\U0001f939\U0001f3fc\u200d\u2640|\U0001f939\U0001f3fd\u200d\u2640|\U0001f939\U0001f3fe\u200d\u2640|\U0001f939\U0001f3ff\u200d\u2640|\U0001f9d8\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2642|\U0001f9d8\U0001f3fc\u200d\u2642|\U0001f9d8\U0001f3fd\u200d\u2642|\U0001f9d8\U0001f3fe\u200d\u2642|\U0001f9d8\U0001f3ff\u200d\u2642|\U0001f9d8\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2640|\U0001f9d8\U0001f3fc\u200d\u2640|\U0001f9d8\U0001f3fd\u200d\u2640|\U0001f9d8\U0001f3fe\u200d\u2640|\U0001f9d8\U0001f3ff\u200d\u2640|\U0001f3f3\ufe0f\u200d\U0001f308|\U0001f3f4\u200d\u2620\ufe0f|\U0001f441\u200d\U0001f5e8|\U0001f471\u200d\u2642|\U0001f468\u200d\U0001f9b0|\U0001f468\u200d\U0001f9b1|\U0001f468\u200d\U0001f9b3|\U0001f468\u200d\U0001f9b2|\U0001f471\u200d\u2640|\U0001f469\u200d\U0001f9b0|\U0001f469\u200d\U0001f9b1|\U0001f469\u200d\U0001f9b3|\U0001f469\u200d\U0001f9b2|\U0001f64d\u200d\u2642|\U0001f64d\u200d\u2640|\U0001f64e\u200d\u2642|\U0001f64e\u200d\u2640|\U0001f645\u200d\u2642|\U0001f645\u200d\u2640|\U0001f646\u200d\u2642|\U0001f646\u200d\u2640|\U0001f481\u200d\u2642|\U0001f481\u200d\u2640|\U0001f64b\u200d\u2642|\U0001f64b\u200d\u2640|\U0001f9cf\u200d\u2642|\U0001f9cf\u200d\u2640|\U0001f647\u200d\u2642|\U0001f647\u200d\u2640|\U0001f926\u200d\u2642|\U0001f926\u200d\u2640|\U0001f937\u200d\u2642|\U0001f937\u200d\u2640|\U0001f468\u200d\u2695|\U0001f469\u200d\u2695|\U0001f468\u200d\U0001f393|\U0001f469\u200d\U0001f393|\U0001f468\u200d\U0001f3eb|\U0001f469\u200d\U0001f3eb|\U0001f468\u200d\u2696|\U0001f469\u200d\u2696|\U0001f468\u200d\U0001f33e|\U0001f469\u200d\U0001f33e|\U0001f468\u200d\U0001f373|\U0001f469\u200d\U0001f373|\U0001f468\u200d\U0001f527|\U0001f469\u200d\U0001f527|\U0001f468\u200d\U0001f3ed|\U0001f469\u200d\U0001f3ed|\U0001f468\u200d\U0001f4bc|\U0001f469\u200d\U0001f4bc|\U0001f468\u200d\U0001f52c|\U0001f469\u200d\U0001f52c|\U0001f468\u200d\U0001f4bb|\U0001f469\u200d\U0001f4bb|\U0001f468\u200d\U0001f3a4|\U0001f469\u200d\U0001f3a4|\U0001f468\u200d\U0001f3a8|\U0001f469\u200d\U0001f3a8|\U0001f468\u200d\u2708|\U0001f469\u200d\u2708|\U0001f468\u200d\U0001f680|\U0001f469\u200d\U0001f680|\U0001f468\u200d\U0001f692|\U0001f469\u200d\U0001f692|\U0001f46e\u200d\u2642|\U0001f46e\u200d\u2640|\U0001f575\u200d\u2642|\U0001f575\u200d\u2640|\U0001f482\u200d\u2642|\U0001f482\u200d\u2640|\U0001f477\u200d\u2642|\U0001f477\u200d\u2640|\U0001f473\u200d\u2642|\U0001f473\u200d\u2640|\U0001f9b8\u200d\u2642|\U0001f9b8\u200d\u2640|\U0001f9b9\u200d\u2642|\U0001f9b9\u200d\u2640|\U0001f9d9\u200d\u2642|\U0001f9d9\u200d\u2640|\U0001f9da\u200d\u2642|\U0001f9da\u200d\u2640|\U0001f9db\u200d\u2642|\U0001f9db\u200d\u2640|\U0001f9dc\u200d\u2642|\U0001f9dc\u200d\u2640|\U0001f9dd\u200d\u2642|\U0001f9dd\u200d\u2640|\U0001f9de\u200d\u2642|\U0001f9de\u200d\u2640|\U0001f9df\u200d\u2642|\U0001f9df\u200d\u2640|\U0001f486\u200d\u2642|\U0001f486\u200d\u2640|\U0001f487\u200d\u2642|\U0001f487\u200d\u2640|\U0001f6b6\u200d\u2642|\U0001f6b6\u200d\u2640|\U0001f9cd\u200d\u2642|\U0001f9cd\u200d\u2640|\U0001f9ce\u200d\u2642|\U0001f9ce\u200d\u2640|\U0001f468\u200d\U0001f9af|\U0001f469\u200d\U0001f9af|\U0001f468\u200d\U0001f9bc|\U0001f469\u200d\U0001f9bc|\U0001f468\u200d\U0001f9bd|\U0001f469\u200d\U0001f9bd|\U0001f3c3\u200d\u2642|\U0001f3c3\u200d\u2640|\U0001f46f\u200d\u2642|\U0001f46f\u200d\u2640|\U0001f9d6\u200d\u2642|\U0001f9d6\u200d\u2640|\U0001f9d7\u200d\u2642|\U0001f9d7\u200d\u2640|\U0001f3cc\u200d\u2642|\U0001f3cc\u200d\u2640|\U0001f3c4\u200d\u2642|\U0001f3c4\u200d\u2640|\U0001f6a3\u200d\u2642|\U0001f6a3\u200d\u2640|\U0001f3ca\u200d\u2642|\U0001f3ca\u200d\u2640|\u26f9\u200d\u2642|\u26f9\u200d\u2640|\U0001f3cb\u200d\u2642|\U0001f3cb\u200d\u2640|\U0001f6b4\u200d\u2642|\U0001f6b4\u200d\u2640|\U0001f6b5\u200d\u2642|\U0001f6b5\u200d\u2640|\U0001f938\u200d\u2642|\U0001f938\u200d\u2640|\U0001f93c\u200d\u2642|\U0001f93c\u200d\u2640|\U0001f93d\u200d\u2642|\U0001f93d\u200d\u2640|\U0001f93e\u200d\u2642|\U0001f93e\u200d\u2640|\U0001f939\u200d\u2642|\U0001f939\u200d\u2640|\U0001f9d8\u200d\u2642|\U0001f9d8\u200d\u2640|\U0001f468\u200d\U0001f466|\U0001f468\u200d\U0001f467|\U0001f469\u200d\U0001f466|\U0001f469\u200d\U0001f467|\U0001f415\u200d\U0001f9ba|\\#\ufe0f\u20e3|\\*\ufe0f\u20e3|0\ufe0f\u20e3|1\ufe0f\u20e3|2\ufe0f\u20e3|3\ufe0f\u20e3|4\ufe0f\u20e3|5\ufe0f\u20e3|6\ufe0f\u20e3|7\ufe0f\u20e3|8\ufe0f\u20e3|9\ufe0f\u20e3|\U0001f3f3\u200d\U0001f308|\U0001f3f4\u200d\u2620|\u263a\ufe0f|\u2639\ufe0f|\u2620\ufe0f|\u2763\ufe0f|\u2764\ufe0f|\U0001f573\ufe0f|\U0001f5e8\ufe0f|\U0001f5ef\ufe0f|\U0001f44b\U0001f3fb|\U0001f44b\U0001f3fc|\U0001f44b\U0001f3fd|\U0001f44b\U0001f3fe|\U0001f44b\U0001f3ff|\U0001f91a\U0001f3fb|\U0001f91a\U0001f3fc|\U0001f91a\U0001f3fd|\U0001f91a\U0001f3fe|\U0001f91a\U0001f3ff|\U0001f590\ufe0f|\U0001f590\U0001f3fb|\U0001f590\U0001f3fc|\U0001f590\U0001f3fd|\U0001f590\U0001f3fe|\U0001f590\U0001f3ff|\u270b\U0001f3fb|\u270b\U0001f3fc|\u270b\U0001f3fd|\u270b\U0001f3fe|\u270b\U0001f3ff|\U0001f596\U0001f3fb|\U0001f596\U0001f3fc|\U0001f596\U0001f3fd|\U0001f596\U0001f3fe|\U0001f596\U0001f3ff|\U0001f44c\U0001f3fb|\U0001f44c\U0001f3fc|\U0001f44c\U0001f3fd|\U0001f44c\U0001f3fe|\U0001f44c\U0001f3ff|\U0001f90f\U0001f3fb|\U0001f90f\U0001f3fc|\U0001f90f\U0001f3fd|\U0001f90f\U0001f3fe|\U0001f90f\U0001f3ff|\u270c\ufe0f|\u270c\U0001f3fb|\u270c\U0001f3fc|\u270c\U0001f3fd|\u270c\U0001f3fe|\u270c\U0001f3ff|\U0001f91e\U0001f3fb|\U0001f91e\U0001f3fc|\U0001f91e\U0001f3fd|\U0001f91e\U0001f3fe|\U0001f91e\U0001f3ff|\U0001f91f\U0001f3fb|\U0001f91f\U0001f3fc|\U0001f91f\U0001f3fd|\U0001f91f\U0001f3fe|\U0001f91f\U0001f3ff|\U0001f918\U0001f3fb|\U0001f918\U0001f3fc|\U0001f918\U0001f3fd|\U0001f918\U0001f3fe|\U0001f918\U0001f3ff|\U0001f919\U0001f3fb|\U0001f919\U0001f3fc|\U0001f919\U0001f3fd|\U0001f919\U0001f3fe|\U0001f919\U0001f3ff|\U0001f448\U0001f3fb|\U0001f448\U0001f3fc|\U0001f448\U0001f3fd|\U0001f448\U0001f3fe|\U0001f448\U0001f3ff|\U0001f449\U0001f3fb|\U0001f449\U0001f3fc|\U0001f449\U0001f3fd|\U0001f449\U0001f3fe|\U0001f449\U0001f3ff|\U0001f446\U0001f3fb|\U0001f446\U0001f3fc|\U0001f446\U0001f3fd|\U0001f446\U0001f3fe|\U0001f446\U0001f3ff|\U0001f595\U0001f3fb|\U0001f595\U0001f3fc|\U0001f595\U0001f3fd|\U0001f595\U0001f3fe|\U0001f595\U0001f3ff|\U0001f447\U0001f3fb|\U0001f447\U0001f3fc|\U0001f447\U0001f3fd|\U0001f447\U0001f3fe|\U0001f447\U0001f3ff|\u261d\ufe0f|\u261d\U0001f3fb|\u261d\U0001f3fc|\u261d\U0001f3fd|\u261d\U0001f3fe|\u261d\U0001f3ff|\U0001f44d\U0001f3fb|\U0001f44d\U0001f3fc|\U0001f44d\U0001f3fd|\U0001f44d\U0001f3fe|\U0001f44d\U0001f3ff|\U0001f44e\U0001f3fb|\U0001f44e\U0001f3fc|\U0001f44e\U0001f3fd|\U0001f44e\U0001f3fe|\U0001f44e\U0001f3ff|\u270a\U0001f3fb|\u270a\U0001f3fc|\u270a\U0001f3fd|\u270a\U0001f3fe|\u270a\U0001f3ff|\U0001f44a\U0001f3fb|\U0001f44a\U0001f3fc|\U0001f44a\U0001f3fd|\U0001f44a\U0001f3fe|\U0001f44a\U0001f3ff|\U0001f91b\U0001f3fb|\U0001f91b\U0001f3fc|\U0001f91b\U0001f3fd|\U0001f91b\U0001f3fe|\U0001f91b\U0001f3ff|\U0001f91c\U0001f3fb|\U0001f91c\U0001f3fc|\U0001f91c\U0001f3fd|\U0001f91c\U0001f3fe|\U0001f91c\U0001f3ff|\U0001f44f\U0001f3fb|\U0001f44f\U0001f3fc|\U0001f44f\U0001f3fd|\U0001f44f\U0001f3fe|\U0001f44f\U0001f3ff|\U0001f64c\U0001f3fb|\U0001f64c\U0001f3fc|\U0001f64c\U0001f3fd|\U0001f64c\U0001f3fe|\U0001f64c\U0001f3ff|\U0001f450\U0001f3fb|\U0001f450\U0001f3fc|\U0001f450\U0001f3fd|\U0001f450\U0001f3fe|\U0001f450\U0001f3ff|\U0001f932\U0001f3fb|\U0001f932\U0001f3fc|\U0001f932\U0001f3fd|\U0001f932\U0001f3fe|\U0001f932\U0001f3ff|\U0001f64f\U0001f3fb|\U0001f64f\U0001f3fc|\U0001f64f\U0001f3fd|\U0001f64f\U0001f3fe|\U0001f64f\U0001f3ff|\u270d\ufe0f|\u270d\U0001f3fb|\u270d\U0001f3fc|\u270d\U0001f3fd|\u270d\U0001f3fe|\u270d\U0001f3ff|\U0001f485\U0001f3fb|\U0001f485\U0001f3fc|\U0001f485\U0001f3fd|\U0001f485\U0001f3fe|\U0001f485\U0001f3ff|\U0001f933\U0001f3fb|\U0001f933\U0001f3fc|\U0001f933\U0001f3fd|\U0001f933\U0001f3fe|\U0001f933\U0001f3ff|\U0001f4aa\U0001f3fb|\U0001f4aa\U0001f3fc|\U0001f4aa\U0001f3fd|\U0001f4aa\U0001f3fe|\U0001f4aa\U0001f3ff|\U0001f9b5\U0001f3fb|\U0001f9b5\U0001f3fc|\U0001f9b5\U0001f3fd|\U0001f9b5\U0001f3fe|\U0001f9b5\U0001f3ff|\U0001f9b6\U0001f3fb|\U0001f9b6\U0001f3fc|\U0001f9b6\U0001f3fd|\U0001f9b6\U0001f3fe|\U0001f9b6\U0001f3ff|\U0001f442\U0001f3fb|\U0001f442\U0001f3fc|\U0001f442\U0001f3fd|\U0001f442\U0001f3fe|\U0001f442\U0001f3ff|\U0001f9bb\U0001f3fb|\U0001f9bb\U0001f3fc|\U0001f9bb\U0001f3fd|\U0001f9bb\U0001f3fe|\U0001f9bb\U0001f3ff|\U0001f443\U0001f3fb|\U0001f443\U0001f3fc|\U0001f443\U0001f3fd|\U0001f443\U0001f3fe|\U0001f443\U0001f3ff|\U0001f441\ufe0f|\U0001f476\U0001f3fb|\U0001f476\U0001f3fc|\U0001f476\U0001f3fd|\U0001f476\U0001f3fe|\U0001f476\U0001f3ff|\U0001f9d2\U0001f3fb|\U0001f9d2\U0001f3fc|\U0001f9d2\U0001f3fd|\U0001f9d2\U0001f3fe|\U0001f9d2\U0001f3ff|\U0001f466\U0001f3fb|\U0001f466\U0001f3fc|\U0001f466\U0001f3fd|\U0001f466\U0001f3fe|\U0001f466\U0001f3ff|\U0001f467\U0001f3fb|\U0001f467\U0001f3fc|\U0001f467\U0001f3fd|\U0001f467\U0001f3fe|\U0001f467\U0001f3ff|\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff|\U0001f471\U0001f3fb|\U0001f471\U0001f3fc|\U0001f471\U0001f3fd|\U0001f471\U0001f3fe|\U0001f471\U0001f3ff|\U0001f468\U0001f3fb|\U0001f468\U0001f3fc|\U0001f468\U0001f3fd|\U0001f468\U0001f3fe|\U0001f468\U0001f3ff|\U0001f9d4\U0001f3fb|\U0001f9d4\U0001f3fc|\U0001f9d4\U0001f3fd|\U0001f9d4\U0001f3fe|\U0001f9d4\U0001f3ff|\U0001f469\U0001f3fb|\U0001f469\U0001f3fc|\U0001f469\U0001f3fd|\U0001f469\U0001f3fe|\U0001f469\U0001f3ff|\U0001f9d3\U0001f3fb|\U0001f9d3\U0001f3fc|\U0001f9d3\U0001f3fd|\U0001f9d3\U0001f3fe|\U0001f9d3\U0001f3ff|\U0001f474\U0001f3fb|\U0001f474\U0001f3fc|\U0001f474\U0001f3fd|\U0001f474\U0001f3fe|\U0001f474\U0001f3ff|\U0001f475\U0001f3fb|\U0001f475\U0001f3fc|\U0001f475\U0001f3fd|\U0001f475\U0001f3fe|\U0001f475\U0001f3ff|\U0001f64d\U0001f3fb|\U0001f64d\U0001f3fc|\U0001f64d\U0001f3fd|\U0001f64d\U0001f3fe|\U0001f64d\U0001f3ff|\U0001f64e\U0001f3fb|\U0001f64e\U0001f3fc|\U0001f64e\U0001f3fd|\U0001f64e\U0001f3fe|\U0001f64e\U0001f3ff|\U0001f645\U0001f3fb|\U0001f645\U0001f3fc|\U0001f645\U0001f3fd|\U0001f645\U0001f3fe|\U0001f645\U0001f3ff|\U0001f646\U0001f3fb|\U0001f646\U0001f3fc|\U0001f646\U0001f3fd|\U0001f646\U0001f3fe|\U0001f646\U0001f3ff|\U0001f481\U0001f3fb|\U0001f481\U0001f3fc|\U0001f481\U0001f3fd|\U0001f481\U0001f3fe|\U0001f481\U0001f3ff|\U0001f64b\U0001f3fb|\U0001f64b\U0001f3fc|\U0001f64b\U0001f3fd|\U0001f64b\U0001f3fe|\U0001f64b\U0001f3ff|\U0001f9cf\U0001f3fb|\U0001f9cf\U0001f3fc|\U0001f9cf\U0001f3fd|\U0001f9cf\U0001f3fe|\U0001f9cf\U0001f3ff|\U0001f647\U0001f3fb|\U0001f647\U0001f3fc|\U0001f647\U0001f3fd|\U0001f647\U0001f3fe|\U0001f647\U0001f3ff|\U0001f926\U0001f3fb|\U0001f926\U0001f3fc|\U0001f926\U0001f3fd|\U0001f926\U0001f3fe|\U0001f926\U0001f3ff|\U0001f937\U0001f3fb|\U0001f937\U0001f3fc|\U0001f937\U0001f3fd|\U0001f937\U0001f3fe|\U0001f937\U0001f3ff|\U0001f46e\U0001f3fb|\U0001f46e\U0001f3fc|\U0001f46e\U0001f3fd|\U0001f46e\U0001f3fe|\U0001f46e\U0001f3ff|\U0001f575\ufe0f|\U0001f575\U0001f3fb|\U0001f575\U0001f3fc|\U0001f575\U0001f3fd|\U0001f575\U0001f3fe|\U0001f575\U0001f3ff|\U0001f482\U0001f3fb|\U0001f482\U0001f3fc|\U0001f482\U0001f3fd|\U0001f482\U0001f3fe|\U0001f482\U0001f3ff|\U0001f477\U0001f3fb|\U0001f477\U0001f3fc|\U0001f477\U0001f3fd|\U0001f477\U0001f3fe|\U0001f477\U0001f3ff|\U0001f934\U0001f3fb|\U0001f934\U0001f3fc|\U0001f934\U0001f3fd|\U0001f934\U0001f3fe|\U0001f934\U0001f3ff|\U0001f478\U0001f3fb|\U0001f478\U0001f3fc|\U0001f478\U0001f3fd|\U0001f478\U0001f3fe|\U0001f478\U0001f3ff|\U0001f473\U0001f3fb|\U0001f473\U0001f3fc|\U0001f473\U0001f3fd|\U0001f473\U0001f3fe|\U0001f473\U0001f3ff|\U0001f472\U0001f3fb|\U0001f472\U0001f3fc|\U0001f472\U0001f3fd|\U0001f472\U0001f3fe|\U0001f472\U0001f3ff|\U0001f9d5\U0001f3fb|\U0001f9d5\U0001f3fc|\U0001f9d5\U0001f3fd|\U0001f9d5\U0001f3fe|\U0001f9d5\U0001f3ff|\U0001f935\U0001f3fb|\U0001f935\U0001f3fc|\U0001f935\U0001f3fd|\U0001f935\U0001f3fe|\U0001f935\U0001f3ff|\U0001f470\U0001f3fb|\U0001f470\U0001f3fc|\U0001f470\U0001f3fd|\U0001f470\U0001f3fe|\U0001f470\U0001f3ff|\U0001f930\U0001f3fb|\U0001f930\U0001f3fc|\U0001f930\U0001f3fd|\U0001f930\U0001f3fe|\U0001f930\U0001f3ff|\U0001f931\U0001f3fb|\U0001f931\U0001f3fc|\U0001f931\U0001f3fd|\U0001f931\U0001f3fe|\U0001f931\U0001f3ff|\U0001f47c\U0001f3fb|\U0001f47c\U0001f3fc|\U0001f47c\U0001f3fd|\U0001f47c\U0001f3fe|\U0001f47c\U0001f3ff|\U0001f385\U0001f3fb|\U0001f385\U0001f3fc|\U0001f385\U0001f3fd|\U0001f385\U0001f3fe|\U0001f385\U0001f3ff|\U0001f936\U0001f3fb|\U0001f936\U0001f3fc|\U0001f936\U0001f3fd|\U0001f936\U0001f3fe|\U0001f936\U0001f3ff|\U0001f9b8\U0001f3fb|\U0001f9b8\U0001f3fc|\U0001f9b8\U0001f3fd|\U0001f9b8\U0001f3fe|\U0001f9b8\U0001f3ff|\U0001f9b9\U0001f3fb|\U0001f9b9\U0001f3fc|\U0001f9b9\U0001f3fd|\U0001f9b9\U0001f3fe|\U0001f9b9\U0001f3ff|\U0001f9d9\U0001f3fb|\U0001f9d9\U0001f3fc|\U0001f9d9\U0001f3fd|\U0001f9d9\U0001f3fe|\U0001f9d9\U0001f3ff|\U0001f9da\U0001f3fb|\U0001f9da\U0001f3fc|\U0001f9da\U0001f3fd|\U0001f9da\U0001f3fe|\U0001f9da\U0001f3ff|\U0001f9db\U0001f3fb|\U0001f9db\U0001f3fc|\U0001f9db\U0001f3fd|\U0001f9db\U0001f3fe|\U0001f9db\U0001f3ff|\U0001f9dc\U0001f3fb|\U0001f9dc\U0001f3fc|\U0001f9dc\U0001f3fd|\U0001f9dc\U0001f3fe|\U0001f9dc\U0001f3ff|\U0001f9dd\U0001f3fb|\U0001f9dd\U0001f3fc|\U0001f9dd\U0001f3fd|\U0001f9dd\U0001f3fe|\U0001f9dd\U0001f3ff|\U0001f486\U0001f3fb|\U0001f486\U0001f3fc|\U0001f486\U0001f3fd|\U0001f486\U0001f3fe|\U0001f486\U0001f3ff|\U0001f487\U0001f3fb|\U0001f487\U0001f3fc|\U0001f487\U0001f3fd|\U0001f487\U0001f3fe|\U0001f487\U0001f3ff|\U0001f6b6\U0001f3fb|\U0001f6b6\U0001f3fc|\U0001f6b6\U0001f3fd|\U0001f6b6\U0001f3fe|\U0001f6b6\U0001f3ff|\U0001f9cd\U0001f3fb|\U0001f9cd\U0001f3fc|\U0001f9cd\U0001f3fd|\U0001f9cd\U0001f3fe|\U0001f9cd\U0001f3ff|\U0001f9ce\U0001f3fb|\U0001f9ce\U0001f3fc|\U0001f9ce\U0001f3fd|\U0001f9ce\U0001f3fe|\U0001f9ce\U0001f3ff|\U0001f3c3\U0001f3fb|\U0001f3c3\U0001f3fc|\U0001f3c3\U0001f3fd|\U0001f3c3\U0001f3fe|\U0001f3c3\U0001f3ff|\U0001f483\U0001f3fb|\U0001f483\U0001f3fc|\U0001f483\U0001f3fd|\U0001f483\U0001f3fe|\U0001f483\U0001f3ff|\U0001f57a\U0001f3fb|\U0001f57a\U0001f3fc|\U0001f57a\U0001f3fd|\U0001f57a\U0001f3fe|\U0001f57a\U0001f3ff|\U0001f574\ufe0f|\U0001f574\U0001f3fb|\U0001f574\U0001f3fc|\U0001f574\U0001f3fd|\U0001f574\U0001f3fe|\U0001f574\U0001f3ff|\U0001f9d6\U0001f3fb|\U0001f9d6\U0001f3fc|\U0001f9d6\U0001f3fd|\U0001f9d6\U0001f3fe|\U0001f9d6\U0001f3ff|\U0001f9d7\U0001f3fb|\U0001f9d7\U0001f3fc|\U0001f9d7\U0001f3fd|\U0001f9d7\U0001f3fe|\U0001f9d7\U0001f3ff|\U0001f3c7\U0001f3fb|\U0001f3c7\U0001f3fc|\U0001f3c7\U0001f3fd|\U0001f3c7\U0001f3fe|\U0001f3c7\U0001f3ff|\u26f7\ufe0f|\U0001f3c2\U0001f3fb|\U0001f3c2\U0001f3fc|\U0001f3c2\U0001f3fd|\U0001f3c2\U0001f3fe|\U0001f3c2\U0001f3ff|\U0001f3cc\ufe0f|\U0001f3cc\U0001f3fb|\U0001f3cc\U0001f3fc|\U0001f3cc\U0001f3fd|\U0001f3cc\U0001f3fe|\U0001f3cc\U0001f3ff|\U0001f3c4\U0001f3fb|\U0001f3c4\U0001f3fc|\U0001f3c4\U0001f3fd|\U0001f3c4\U0001f3fe|\U0001f3c4\U0001f3ff|\U0001f6a3\U0001f3fb|\U0001f6a3\U0001f3fc|\U0001f6a3\U0001f3fd|\U0001f6a3\U0001f3fe|\U0001f6a3\U0001f3ff|\U0001f3ca\U0001f3fb|\U0001f3ca\U0001f3fc|\U0001f3ca\U0001f3fd|\U0001f3ca\U0001f3fe|\U0001f3ca\U0001f3ff|\u26f9\ufe0f|\u26f9\U0001f3fb|\u26f9\U0001f3fc|\u26f9\U0001f3fd|\u26f9\U0001f3fe|\u26f9\U0001f3ff|\U0001f3cb\ufe0f|\U0001f3cb\U0001f3fb|\U0001f3cb\U0001f3fc|\U0001f3cb\U0001f3fd|\U0001f3cb\U0001f3fe|\U0001f3cb\U0001f3ff|\U0001f6b4\U0001f3fb|\U0001f6b4\U0001f3fc|\U0001f6b4\U0001f3fd|\U0001f6b4\U0001f3fe|\U0001f6b4\U0001f3ff|\U0001f6b5\U0001f3fb|\U0001f6b5\U0001f3fc|\U0001f6b5\U0001f3fd|\U0001f6b5\U0001f3fe|\U0001f6b5\U0001f3ff|\U0001f938\U0001f3fb|\U0001f938\U0001f3fc|\U0001f938\U0001f3fd|\U0001f938\U0001f3fe|\U0001f938\U0001f3ff|\U0001f93d\U0001f3fb|\U0001f93d\U0001f3fc|\U0001f93d\U0001f3fd|\U0001f93d\U0001f3fe|\U0001f93d\U0001f3ff|\U0001f93e\U0001f3fb|\U0001f93e\U0001f3fc|\U0001f93e\U0001f3fd|\U0001f93e\U0001f3fe|\U0001f93e\U0001f3ff|\U0001f939\U0001f3fb|\U0001f939\U0001f3fc|\U0001f939\U0001f3fd|\U0001f939\U0001f3fe|\U0001f939\U0001f3ff|\U0001f9d8\U0001f3fb|\U0001f9d8\U0001f3fc|\U0001f9d8\U0001f3fd|\U0001f9d8\U0001f3fe|\U0001f9d8\U0001f3ff|\U0001f6c0\U0001f3fb|\U0001f6c0\U0001f3fc|\U0001f6c0\U0001f3fd|\U0001f6c0\U0001f3fe|\U0001f6c0\U0001f3ff|\U0001f6cc\U0001f3fb|\U0001f6cc\U0001f3fc|\U0001f6cc\U0001f3fd|\U0001f6cc\U0001f3fe|\U0001f6cc\U0001f3ff|\U0001f46d\U0001f3fb|\U0001f46d\U0001f3fc|\U0001f46d\U0001f3fd|\U0001f46d\U0001f3fe|\U0001f46d\U0001f3ff|\U0001f46b\U0001f3fb|\U0001f46b\U0001f3fc|\U0001f46b\U0001f3fd|\U0001f46b\U0001f3fe|\U0001f46b\U0001f3ff|\U0001f46c\U0001f3fb|\U0001f46c\U0001f3fc|\U0001f46c\U0001f3fd|\U0001f46c\U0001f3fe|\U0001f46c\U0001f3ff|\U0001f5e3\ufe0f|\U0001f43f\ufe0f|\U0001f54a\ufe0f|\U0001f577\ufe0f|\U0001f578\ufe0f|\U0001f3f5\ufe0f|\u2618\ufe0f|\U0001f336\ufe0f|\U0001f37d\ufe0f|\U0001f5fa\ufe0f|\U0001f3d4\ufe0f|\u26f0\ufe0f|\U0001f3d5\ufe0f|\U0001f3d6\ufe0f|\U0001f3dc\ufe0f|\U0001f3dd\ufe0f|\U0001f3de\ufe0f|\U0001f3df\ufe0f|\U0001f3db\ufe0f|\U0001f3d7\ufe0f|\U0001f3d8\ufe0f|\U0001f3da\ufe0f|\u26e9\ufe0f|\U0001f3d9\ufe0f|\u2668\ufe0f|\U0001f3ce\ufe0f|\U0001f3cd\ufe0f|\U0001f6e3\ufe0f|\U0001f6e4\ufe0f|\U0001f6e2\ufe0f|\U0001f6f3\ufe0f|\u26f4\ufe0f|\U0001f6e5\ufe0f|\u2708\ufe0f|\U0001f6e9\ufe0f|\U0001f6f0\ufe0f|\U0001f6ce\ufe0f|\u23f1\ufe0f|\u23f2\ufe0f|\U0001f570\ufe0f|\U0001f321\ufe0f|\u2600\ufe0f|\u2601\ufe0f|\u26c8\ufe0f|\U0001f324\ufe0f|\U0001f325\ufe0f|\U0001f326\ufe0f|\U0001f327\ufe0f|\U0001f328\ufe0f|\U0001f329\ufe0f|\U0001f32a\ufe0f|\U0001f32b\ufe0f|\U0001f32c\ufe0f|\u2602\ufe0f|\u26f1\ufe0f|\u2744\ufe0f|\u2603\ufe0f|\u2604\ufe0f|\U0001f397\ufe0f|\U0001f39f\ufe0f|\U0001f396\ufe0f|\u26f8\ufe0f|\U0001f579\ufe0f|\u2660\ufe0f|\u2665\ufe0f|\u2666\ufe0f|\u2663\ufe0f|\u265f\ufe0f|\U0001f5bc\ufe0f|\U0001f576\ufe0f|\U0001f6cd\ufe0f|\u26d1\ufe0f|\U0001f399\ufe0f|\U0001f39a\ufe0f|\U0001f39b\ufe0f|\u260e\ufe0f|\U0001f5a5\ufe0f|\U0001f5a8\ufe0f|\u2328\ufe0f|\U0001f5b1\ufe0f|\U0001f5b2\ufe0f|\U0001f39e\ufe0f|\U0001f4fd\ufe0f|\U0001f56f\ufe0f|\U0001f5de\ufe0f|\U0001f3f7\ufe0f|\u2709\ufe0f|\U0001f5f3\ufe0f|\u270f\ufe0f|\u2712\ufe0f|\U0001f58b\ufe0f|\U0001f58a\ufe0f|\U0001f58c\ufe0f|\U0001f58d\ufe0f|\U0001f5c2\ufe0f|\U0001f5d2\ufe0f|\U0001f5d3\ufe0f|\U0001f587\ufe0f|\u2702\ufe0f|\U0001f5c3\ufe0f|\U0001f5c4\ufe0f|\U0001f5d1\ufe0f|\U0001f5dd\ufe0f|\u26cf\ufe0f|\u2692\ufe0f|\U0001f6e0\ufe0f|\U0001f5e1\ufe0f|\u2694\ufe0f|\U0001f6e1\ufe0f|\u2699\ufe0f|\U0001f5dc\ufe0f|\u2696\ufe0f|\u26d3\ufe0f|\u2697\ufe0f|\U0001f6cf\ufe0f|\U0001f6cb\ufe0f|\u26b0\ufe0f|\u26b1\ufe0f|\u26a0\ufe0f|\u2622\ufe0f|\u2623\ufe0f|\u2b06\ufe0f|\u2197\ufe0f|\u27a1\ufe0f|\u2198\ufe0f|\u2b07\ufe0f|\u2199\ufe0f|\u2b05\ufe0f|\u2196\ufe0f|\u2195\ufe0f|\u2194\ufe0f|\u21a9\ufe0f|\u21aa\ufe0f|\u2934\ufe0f|\u2935\ufe0f|\u269b\ufe0f|\U0001f549\ufe0f|\u2721\ufe0f|\u2638\ufe0f|\u262f\ufe0f|\u271d\ufe0f|\u2626\ufe0f|\u262a\ufe0f|\u262e\ufe0f|\u25b6\ufe0f|\u23ed\ufe0f|\u23ef\ufe0f|\u25c0\ufe0f|\u23ee\ufe0f|\u23f8\ufe0f|\u23f9\ufe0f|\u23fa\ufe0f|\u23cf\ufe0f|\u2640\ufe0f|\u2642\ufe0f|\u2695\ufe0f|\u267e\ufe0f|\u267b\ufe0f|\u269c\ufe0f|\u2611\ufe0f|\u2714\ufe0f|\u2716\ufe0f|\u303d\ufe0f|\u2733\ufe0f|\u2734\ufe0f|\u2747\ufe0f|\u203c\ufe0f|\u2049\ufe0f|\u3030\ufe0f|\xa9\ufe0f|\xae\ufe0f|\u2122\ufe0f|\\#\u20e3|\\*\u20e3|0\u20e3|1\u20e3|2\u20e3|3\u20e3|4\u20e3|5\u20e3|6\u20e3|7\u20e3|8\u20e3|9\u20e3|\U0001f170\ufe0f|\U0001f171\ufe0f|\u2139\ufe0f|\u24c2\ufe0f|\U0001f17e\ufe0f|\U0001f17f\ufe0f|\U0001f202\ufe0f|\U0001f237\ufe0f|\u3297\ufe0f|\u3299\ufe0f|\u25fc\ufe0f|\u25fb\ufe0f|\u25aa\ufe0f|\u25ab\ufe0f|\U0001f3f3\ufe0f|\U0001f1e6\U0001f1e8|\U0001f1e6\U0001f1e9|\U0001f1e6\U0001f1ea|\U0001f1e6\U0001f1eb|\U0001f1e6\U0001f1ec|\U0001f1e6\U0001f1ee|\U0001f1e6\U0001f1f1|\U0001f1e6\U0001f1f2|\U0001f1e6\U0001f1f4|\U0001f1e6\U0001f1f6|\U0001f1e6\U0001f1f7|\U0001f1e6\U0001f1f8|\U0001f1e6\U0001f1f9|\U0001f1e6\U0001f1fa|\U0001f1e6\U0001f1fc|\U0001f1e6\U0001f1fd|\U0001f1e6\U0001f1ff|\U0001f1e7\U0001f1e6|\U0001f1e7\U0001f1e7|\U0001f1e7\U0001f1e9|\U0001f1e7\U0001f1ea|\U0001f1e7\U0001f1eb|\U0001f1e7\U0001f1ec|\U0001f1e7\U0001f1ed|\U0001f1e7\U0001f1ee|\U0001f1e7\U0001f1ef|\U0001f1e7\U0001f1f1|\U0001f1e7\U0001f1f2|\U0001f1e7\U0001f1f3|\U0001f1e7\U0001f1f4|\U0001f1e7\U0001f1f6|\U0001f1e7\U0001f1f7|\U0001f1e7\U0001f1f8|\U0001f1e7\U0001f1f9|\U0001f1e7\U0001f1fb|\U0001f1e7\U0001f1fc|\U0001f1e7\U0001f1fe|\U0001f1e7\U0001f1ff|\U0001f1e8\U0001f1e6|\U0001f1e8\U0001f1e8|\U0001f1e8\U0001f1e9|\U0001f1e8\U0001f1eb|\U0001f1e8\U0001f1ec|\U0001f1e8\U0001f1ed|\U0001f1e8\U0001f1ee|\U0001f1e8\U0001f1f0|\U0001f1e8\U0001f1f1|\U0001f1e8\U0001f1f2|\U0001f1e8\U0001f1f3|\U0001f1e8\U0001f1f4|\U0001f1e8\U0001f1f5|\U0001f1e8\U0001f1f7|\U0001f1e8\U0001f1fa|\U0001f1e8\U0001f1fb|\U0001f1e8\U0001f1fc|\U0001f1e8\U0001f1fd|\U0001f1e8\U0001f1fe|\U0001f1e8\U0001f1ff|\U0001f1e9\U0001f1ea|\U0001f1e9\U0001f1ec|\U0001f1e9\U0001f1ef|\U0001f1e9\U0001f1f0|\U0001f1e9\U0001f1f2|\U0001f1e9\U0001f1f4|\U0001f1e9\U0001f1ff|\U0001f1ea\U0001f1e6|\U0001f1ea\U0001f1e8|\U0001f1ea\U0001f1ea|\U0001f1ea\U0001f1ec|\U0001f1ea\U0001f1ed|\U0001f1ea\U0001f1f7|\U0001f1ea\U0001f1f8|\U0001f1ea\U0001f1f9|\U0001f1ea\U0001f1fa|\U0001f1eb\U0001f1ee|\U0001f1eb\U0001f1ef|\U0001f1eb\U0001f1f0|\U0001f1eb\U0001f1f2|\U0001f1eb\U0001f1f4|\U0001f1eb\U0001f1f7|\U0001f1ec\U0001f1e6|\U0001f1ec\U0001f1e7|\U0001f1ec\U0001f1e9|\U0001f1ec\U0001f1ea|\U0001f1ec\U0001f1eb|\U0001f1ec\U0001f1ec|\U0001f1ec\U0001f1ed|\U0001f1ec\U0001f1ee|\U0001f1ec\U0001f1f1|\U0001f1ec\U0001f1f2|\U0001f1ec\U0001f1f3|\U0001f1ec\U0001f1f5|\U0001f1ec\U0001f1f6|\U0001f1ec\U0001f1f7|\U0001f1ec\U0001f1f8|\U0001f1ec\U0001f1f9|\U0001f1ec\U0001f1fa|\U0001f1ec\U0001f1fc|\U0001f1ec\U0001f1fe|\U0001f1ed\U0001f1f0|\U0001f1ed\U0001f1f2|\U0001f1ed\U0001f1f3|\U0001f1ed\U0001f1f7|\U0001f1ed\U0001f1f9|\U0001f1ed\U0001f1fa|\U0001f1ee\U0001f1e8|\U0001f1ee\U0001f1e9|\U0001f1ee\U0001f1ea|\U0001f1ee\U0001f1f1|\U0001f1ee\U0001f1f2|\U0001f1ee\U0001f1f3|\U0001f1ee\U0001f1f4|\U0001f1ee\U0001f1f6|\U0001f1ee\U0001f1f7|\U0001f1ee\U0001f1f8|\U0001f1ee\U0001f1f9|\U0001f1ef\U0001f1ea|\U0001f1ef\U0001f1f2|\U0001f1ef\U0001f1f4|\U0001f1ef\U0001f1f5|\U0001f1f0\U0001f1ea|\U0001f1f0\U0001f1ec|\U0001f1f0\U0001f1ed|\U0001f1f0\U0001f1ee|\U0001f1f0\U0001f1f2|\U0001f1f0\U0001f1f3|\U0001f1f0\U0001f1f5|\U0001f1f0\U0001f1f7|\U0001f1f0\U0001f1fc|\U0001f1f0\U0001f1fe|\U0001f1f0\U0001f1ff|\U0001f1f1\U0001f1e6|\U0001f1f1\U0001f1e7|\U0001f1f1\U0001f1e8|\U0001f1f1\U0001f1ee|\U0001f1f1\U0001f1f0|\U0001f1f1\U0001f1f7|\U0001f1f1\U0001f1f8|\U0001f1f1\U0001f1f9|\U0001f1f1\U0001f1fa|\U0001f1f1\U0001f1fb|\U0001f1f1\U0001f1fe|\U0001f1f2\U0001f1e6|\U0001f1f2\U0001f1e8|\U0001f1f2\U0001f1e9|\U0001f1f2\U0001f1ea|\U0001f1f2\U0001f1eb|\U0001f1f2\U0001f1ec|\U0001f1f2\U0001f1ed|\U0001f1f2\U0001f1f0|\U0001f1f2\U0001f1f1|\U0001f1f2\U0001f1f2|\U0001f1f2\U0001f1f3|\U0001f1f2\U0001f1f4|\U0001f1f2\U0001f1f5|\U0001f1f2\U0001f1f6|\U0001f1f2\U0001f1f7|\U0001f1f2\U0001f1f8|\U0001f1f2\U0001f1f9|\U0001f1f2\U0001f1fa|\U0001f1f2\U0001f1fb|\U0001f1f2\U0001f1fc|\U0001f1f2\U0001f1fd|\U0001f1f2\U0001f1fe|\U0001f1f2\U0001f1ff|\U0001f1f3\U0001f1e6|\U0001f1f3\U0001f1e8|\U0001f1f3\U0001f1ea|\U0001f1f3\U0001f1eb|\U0001f1f3\U0001f1ec|\U0001f1f3\U0001f1ee|\U0001f1f3\U0001f1f1|\U0001f1f3\U0001f1f4|\U0001f1f3\U0001f1f5|\U0001f1f3\U0001f1f7|\U0001f1f3\U0001f1fa|\U0001f1f3\U0001f1ff|\U0001f1f4\U0001f1f2|\U0001f1f5\U0001f1e6|\U0001f1f5\U0001f1ea|\U0001f1f5\U0001f1eb|\U0001f1f5\U0001f1ec|\U0001f1f5\U0001f1ed|\U0001f1f5\U0001f1f0|\U0001f1f5\U0001f1f1|\U0001f1f5\U0001f1f2|\U0001f1f5\U0001f1f3|\U0001f1f5\U0001f1f7|\U0001f1f5\U0001f1f8|\U0001f1f5\U0001f1f9|\U0001f1f5\U0001f1fc|\U0001f1f5\U0001f1fe|\U0001f1f6\U0001f1e6|\U0001f1f7\U0001f1ea|\U0001f1f7\U0001f1f4|\U0001f1f7\U0001f1f8|\U0001f1f7\U0001f1fa|\U0001f1f7\U0001f1fc|\U0001f1f8\U0001f1e6|\U0001f1f8\U0001f1e7|\U0001f1f8\U0001f1e8|\U0001f1f8\U0001f1e9|\U0001f1f8\U0001f1ea|\U0001f1f8\U0001f1ec|\U0001f1f8\U0001f1ed|\U0001f1f8\U0001f1ee|\U0001f1f8\U0001f1ef|\U0001f1f8\U0001f1f0|\U0001f1f8\U0001f1f1|\U0001f1f8\U0001f1f2|\U0001f1f8\U0001f1f3|\U0001f1f8\U0001f1f4|\U0001f1f8\U0001f1f7|\U0001f1f8\U0001f1f8|\U0001f1f8\U0001f1f9|\U0001f1f8\U0001f1fb|\U0001f1f8\U0001f1fd|\U0001f1f8\U0001f1fe|\U0001f1f8\U0001f1ff|\U0001f1f9\U0001f1e6|\U0001f1f9\U0001f1e8|\U0001f1f9\U0001f1e9|\U0001f1f9\U0001f1eb|\U0001f1f9\U0001f1ec|\U0001f1f9\U0001f1ed|\U0001f1f9\U0001f1ef|\U0001f1f9\U0001f1f0|\U0001f1f9\U0001f1f1|\U0001f1f9\U0001f1f2|\U0001f1f9\U0001f1f3|\U0001f1f9\U0001f1f4|\U0001f1f9\U0001f1f7|\U0001f1f9\U0001f1f9|\U0001f1f9\U0001f1fb|\U0001f1f9\U0001f1fc|\U0001f1f9\U0001f1ff|\U0001f1fa\U0001f1e6|\U0001f1fa\U0001f1ec|\U0001f1fa\U0001f1f2|\U0001f1fa\U0001f1f3|\U0001f1fa\U0001f1f8|\U0001f1fa\U0001f1fe|\U0001f1fa\U0001f1ff|\U0001f1fb\U0001f1e6|\U0001f1fb\U0001f1e8|\U0001f1fb\U0001f1ea|\U0001f1fb\U0001f1ec|\U0001f1fb\U0001f1ee|\U0001f1fb\U0001f1f3|\U0001f1fb\U0001f1fa|\U0001f1fc\U0001f1eb|\U0001f1fc\U0001f1f8|\U0001f1fd\U0001f1f0|\U0001f1fe\U0001f1ea|\U0001f1fe\U0001f1f9|\U0001f1ff\U0001f1e6|\U0001f1ff\U0001f1f2|\U0001f1ff\U0001f1fc|\U0001f600|\U0001f603|\U0001f604|\U0001f601|\U0001f606|\U0001f605|\U0001f923|\U0001f602|\U0001f642|\U0001f643|\U0001f609|\U0001f60a|\U0001f607|\U0001f970|\U0001f60d|\U0001f929|\U0001f618|\U0001f617|\u263a|\U0001f61a|\U0001f619|\U0001f60b|\U0001f61b|\U0001f61c|\U0001f92a|\U0001f61d|\U0001f911|\U0001f917|\U0001f92d|\U0001f92b|\U0001f914|\U0001f910|\U0001f928|\U0001f610|\U0001f611|\U0001f636|\U0001f60f|\U0001f612|\U0001f644|\U0001f62c|\U0001f925|\U0001f60c|\U0001f614|\U0001f62a|\U0001f924|\U0001f634|\U0001f637|\U0001f912|\U0001f915|\U0001f922|\U0001f92e|\U0001f927|\U0001f975|\U0001f976|\U0001f974|\U0001f635|\U0001f92f|\U0001f920|\U0001f973|\U0001f60e|\U0001f913|\U0001f9d0|\U0001f615|\U0001f61f|\U0001f641|\u2639|\U0001f62e|\U0001f62f|\U0001f632|\U0001f633|\U0001f97a|\U0001f626|\U0001f627|\U0001f628|\U0001f630|\U0001f625|\U0001f622|\U0001f62d|\U0001f631|\U0001f616|\U0001f623|\U0001f61e|\U0001f613|\U0001f629|\U0001f62b|\U0001f971|\U0001f624|\U0001f621|\U0001f620|\U0001f92c|\U0001f608|\U0001f47f|\U0001f480|\u2620|\U0001f4a9|\U0001f921|\U0001f479|\U0001f47a|\U0001f47b|\U0001f47d|\U0001f47e|\U0001f916|\U0001f63a|\U0001f638|\U0001f639|\U0001f63b|\U0001f63c|\U0001f63d|\U0001f640|\U0001f63f|\U0001f63e|\U0001f648|\U0001f649|\U0001f64a|\U0001f48b|\U0001f48c|\U0001f498|\U0001f49d|\U0001f496|\U0001f497|\U0001f493|\U0001f49e|\U0001f495|\U0001f49f|\u2763|\U0001f494|\u2764|\U0001f9e1|\U0001f49b|\U0001f49a|\U0001f499|\U0001f49c|\U0001f90e|\U0001f5a4|\U0001f90d|\U0001f4af|\U0001f4a2|\U0001f4a5|\U0001f4ab|\U0001f4a6|\U0001f4a8|\U0001f573|\U0001f4a3|\U0001f4ac|\U0001f5e8|\U0001f5ef|\U0001f4ad|\U0001f4a4|\U0001f44b|\U0001f91a|\U0001f590|\u270b|\U0001f596|\U0001f44c|\U0001f90f|\u270c|\U0001f91e|\U0001f91f|\U0001f918|\U0001f919|\U0001f448|\U0001f449|\U0001f446|\U0001f595|\U0001f447|\u261d|\U0001f44d|\U0001f44e|\u270a|\U0001f44a|\U0001f91b|\U0001f91c|\U0001f44f|\U0001f64c|\U0001f450|\U0001f932|\U0001f91d|\U0001f64f|\u270d|\U0001f485|\U0001f933|\U0001f4aa|\U0001f9be|\U0001f9bf|\U0001f9b5|\U0001f9b6|\U0001f442|\U0001f9bb|\U0001f443|\U0001f9e0|\U0001f9b7|\U0001f9b4|\U0001f440|\U0001f441|\U0001f445|\U0001f444|\U0001f476|\U0001f9d2|\U0001f466|\U0001f467|\U0001f9d1|\U0001f471|\U0001f468|\U0001f9d4|\U0001f469|\U0001f9d3|\U0001f474|\U0001f475|\U0001f64d|\U0001f64e|\U0001f645|\U0001f646|\U0001f481|\U0001f64b|\U0001f9cf|\U0001f647|\U0001f926|\U0001f937|\U0001f46e|\U0001f575|\U0001f482|\U0001f477|\U0001f934|\U0001f478|\U0001f473|\U0001f472|\U0001f9d5|\U0001f935|\U0001f470|\U0001f930|\U0001f931|\U0001f47c|\U0001f385|\U0001f936|\U0001f9b8|\U0001f9b9|\U0001f9d9|\U0001f9da|\U0001f9db|\U0001f9dc|\U0001f9dd|\U0001f9de|\U0001f9df|\U0001f486|\U0001f487|\U0001f6b6|\U0001f9cd|\U0001f9ce|\U0001f3c3|\U0001f483|\U0001f57a|\U0001f574|\U0001f46f|\U0001f9d6|\U0001f9d7|\U0001f93a|\U0001f3c7|\u26f7|\U0001f3c2|\U0001f3cc|\U0001f3c4|\U0001f6a3|\U0001f3ca|\u26f9|\U0001f3cb|\U0001f6b4|\U0001f6b5|\U0001f938|\U0001f93c|\U0001f93d|\U0001f93e|\U0001f939|\U0001f9d8|\U0001f6c0|\U0001f6cc|\U0001f46d|\U0001f46b|\U0001f46c|\U0001f48f|\U0001f491|\U0001f46a|\U0001f5e3|\U0001f464|\U0001f465|\U0001f463|\U0001f3fb|\U0001f3fc|\U0001f3fd|\U0001f3fe|\U0001f3ff|\U0001f9b0|\U0001f9b1|\U0001f9b3|\U0001f9b2|\U0001f435|\U0001f412|\U0001f98d|\U0001f9a7|\U0001f436|\U0001f415|\U0001f9ae|\U0001f429|\U0001f43a|\U0001f98a|\U0001f99d|\U0001f431|\U0001f408|\U0001f981|\U0001f42f|\U0001f405|\U0001f406|\U0001f434|\U0001f40e|\U0001f984|\U0001f993|\U0001f98c|\U0001f42e|\U0001f402|\U0001f403|\U0001f404|\U0001f437|\U0001f416|\U0001f417|\U0001f43d|\U0001f40f|\U0001f411|\U0001f410|\U0001f42a|\U0001f42b|\U0001f999|\U0001f992|\U0001f418|\U0001f98f|\U0001f99b|\U0001f42d|\U0001f401|\U0001f400|\U0001f439|\U0001f430|\U0001f407|\U0001f43f|\U0001f994|\U0001f987|\U0001f43b|\U0001f428|\U0001f43c|\U0001f9a5|\U0001f9a6|\U0001f9a8|\U0001f998|\U0001f9a1|\U0001f43e|\U0001f983|\U0001f414|\U0001f413|\U0001f423|\U0001f424|\U0001f425|\U0001f426|\U0001f427|\U0001f54a|\U0001f985|\U0001f986|\U0001f9a2|\U0001f989|\U0001f9a9|\U0001f99a|\U0001f99c|\U0001f438|\U0001f40a|\U0001f422|\U0001f98e|\U0001f40d|\U0001f432|\U0001f409|\U0001f995|\U0001f996|\U0001f433|\U0001f40b|\U0001f42c|\U0001f41f|\U0001f420|\U0001f421|\U0001f988|\U0001f419|\U0001f41a|\U0001f40c|\U0001f98b|\U0001f41b|\U0001f41c|\U0001f41d|\U0001f41e|\U0001f997|\U0001f577|\U0001f578|\U0001f982|\U0001f99f|\U0001f9a0|\U0001f490|\U0001f338|\U0001f4ae|\U0001f3f5|\U0001f339|\U0001f940|\U0001f33a|\U0001f33b|\U0001f33c|\U0001f337|\U0001f331|\U0001f332|\U0001f333|\U0001f334|\U0001f335|\U0001f33e|\U0001f33f|\u2618|\U0001f340|\U0001f341|\U0001f342|\U0001f343|\U0001f347|\U0001f348|\U0001f349|\U0001f34a|\U0001f34b|\U0001f34c|\U0001f34d|\U0001f96d|\U0001f34e|\U0001f34f|\U0001f350|\U0001f351|\U0001f352|\U0001f353|\U0001f95d|\U0001f345|\U0001f965|\U0001f951|\U0001f346|\U0001f954|\U0001f955|\U0001f33d|\U0001f336|\U0001f952|\U0001f96c|\U0001f966|\U0001f9c4|\U0001f9c5|\U0001f344|\U0001f95c|\U0001f330|\U0001f35e|\U0001f950|\U0001f956|\U0001f968|\U0001f96f|\U0001f95e|\U0001f9c7|\U0001f9c0|\U0001f356|\U0001f357|\U0001f969|\U0001f953|\U0001f354|\U0001f35f|\U0001f355|\U0001f32d|\U0001f96a|\U0001f32e|\U0001f32f|\U0001f959|\U0001f9c6|\U0001f95a|\U0001f373|\U0001f958|\U0001f372|\U0001f963|\U0001f957|\U0001f37f|\U0001f9c8|\U0001f9c2|\U0001f96b|\U0001f371|\U0001f358|\U0001f359|\U0001f35a|\U0001f35b|\U0001f35c|\U0001f35d|\U0001f360|\U0001f362|\U0001f363|\U0001f364|\U0001f365|\U0001f96e|\U0001f361|\U0001f95f|\U0001f960|\U0001f961|\U0001f980|\U0001f99e|\U0001f990|\U0001f991|\U0001f9aa|\U0001f366|\U0001f367|\U0001f368|\U0001f369|\U0001f36a|\U0001f382|\U0001f370|\U0001f9c1|\U0001f967|\U0001f36b|\U0001f36c|\U0001f36d|\U0001f36e|\U0001f36f|\U0001f37c|\U0001f95b|\u2615|\U0001f375|\U0001f376|\U0001f37e|\U0001f377|\U0001f378|\U0001f379|\U0001f37a|\U0001f37b|\U0001f942|\U0001f943|\U0001f964|\U0001f9c3|\U0001f9c9|\U0001f9ca|\U0001f962|\U0001f37d|\U0001f374|\U0001f944|\U0001f52a|\U0001f3fa|\U0001f30d|\U0001f30e|\U0001f30f|\U0001f310|\U0001f5fa|\U0001f5fe|\U0001f9ed|\U0001f3d4|\u26f0|\U0001f30b|\U0001f5fb|\U0001f3d5|\U0001f3d6|\U0001f3dc|\U0001f3dd|\U0001f3de|\U0001f3df|\U0001f3db|\U0001f3d7|\U0001f9f1|\U0001f3d8|\U0001f3da|\U0001f3e0|\U0001f3e1|\U0001f3e2|\U0001f3e3|\U0001f3e4|\U0001f3e5|\U0001f3e6|\U0001f3e8|\U0001f3e9|\U0001f3ea|\U0001f3eb|\U0001f3ec|\U0001f3ed|\U0001f3ef|\U0001f3f0|\U0001f492|\U0001f5fc|\U0001f5fd|\u26ea|\U0001f54c|\U0001f6d5|\U0001f54d|\u26e9|\U0001f54b|\u26f2|\u26fa|\U0001f301|\U0001f303|\U0001f3d9|\U0001f304|\U0001f305|\U0001f306|\U0001f307|\U0001f309|\u2668|\U0001f3a0|\U0001f3a1|\U0001f3a2|\U0001f488|\U0001f3aa|\U0001f682|\U0001f683|\U0001f684|\U0001f685|\U0001f686|\U0001f687|\U0001f688|\U0001f689|\U0001f68a|\U0001f69d|\U0001f69e|\U0001f68b|\U0001f68c|\U0001f68d|\U0001f68e|\U0001f690|\U0001f691|\U0001f692|\U0001f693|\U0001f694|\U0001f695|\U0001f696|\U0001f697|\U0001f698|\U0001f699|\U0001f69a|\U0001f69b|\U0001f69c|\U0001f3ce|\U0001f3cd|\U0001f6f5|\U0001f9bd|\U0001f9bc|\U0001f6fa|\U0001f6b2|\U0001f6f4|\U0001f6f9|\U0001f68f|\U0001f6e3|\U0001f6e4|\U0001f6e2|\u26fd|\U0001f6a8|\U0001f6a5|\U0001f6a6|\U0001f6d1|\U0001f6a7|\u2693|\u26f5|\U0001f6f6|\U0001f6a4|\U0001f6f3|\u26f4|\U0001f6e5|\U0001f6a2|\u2708|\U0001f6e9|\U0001f6eb|\U0001f6ec|\U0001fa82|\U0001f4ba|\U0001f681|\U0001f69f|\U0001f6a0|\U0001f6a1|\U0001f6f0|\U0001f680|\U0001f6f8|\U0001f6ce|\U0001f9f3|\u231b|\u23f3|\u231a|\u23f0|\u23f1|\u23f2|\U0001f570|\U0001f55b|\U0001f567|\U0001f550|\U0001f55c|\U0001f551|\U0001f55d|\U0001f552|\U0001f55e|\U0001f553|\U0001f55f|\U0001f554|\U0001f560|\U0001f555|\U0001f561|\U0001f556|\U0001f562|\U0001f557|\U0001f563|\U0001f558|\U0001f564|\U0001f559|\U0001f565|\U0001f55a|\U0001f566|\U0001f311|\U0001f312|\U0001f313|\U0001f314|\U0001f315|\U0001f316|\U0001f317|\U0001f318|\U0001f319|\U0001f31a|\U0001f31b|\U0001f31c|\U0001f321|\u2600|\U0001f31d|\U0001f31e|\U0001fa90|\u2b50|\U0001f31f|\U0001f320|\U0001f30c|\u2601|\u26c5|\u26c8|\U0001f324|\U0001f325|\U0001f326|\U0001f327|\U0001f328|\U0001f329|\U0001f32a|\U0001f32b|\U0001f32c|\U0001f300|\U0001f308|\U0001f302|\u2602|\u2614|\u26f1|\u26a1|\u2744|\u2603|\u26c4|\u2604|\U0001f525|\U0001f4a7|\U0001f30a|\U0001f383|\U0001f384|\U0001f386|\U0001f387|\U0001f9e8|\u2728|\U0001f388|\U0001f389|\U0001f38a|\U0001f38b|\U0001f38d|\U0001f38e|\U0001f38f|\U0001f390|\U0001f391|\U0001f9e7|\U0001f380|\U0001f381|\U0001f397|\U0001f39f|\U0001f3ab|\U0001f396|\U0001f3c6|\U0001f3c5|\U0001f947|\U0001f948|\U0001f949|\u26bd|\u26be|\U0001f94e|\U0001f3c0|\U0001f3d0|\U0001f3c8|\U0001f3c9|\U0001f3be|\U0001f94f|\U0001f3b3|\U0001f3cf|\U0001f3d1|\U0001f3d2|\U0001f94d|\U0001f3d3|\U0001f3f8|\U0001f94a|\U0001f94b|\U0001f945|\u26f3|\u26f8|\U0001f3a3|\U0001f93f|\U0001f3bd|\U0001f3bf|\U0001f6f7|\U0001f94c|\U0001f3af|\U0001fa80|\U0001fa81|\U0001f3b1|\U0001f52e|\U0001f9ff|\U0001f3ae|\U0001f579|\U0001f3b0|\U0001f3b2|\U0001f9e9|\U0001f9f8|\u2660|\u2665|\u2666|\u2663|\u265f|\U0001f0cf|\U0001f004|\U0001f3b4|\U0001f3ad|\U0001f5bc|\U0001f3a8|\U0001f9f5|\U0001f9f6|\U0001f453|\U0001f576|\U0001f97d|\U0001f97c|\U0001f9ba|\U0001f454|\U0001f455|\U0001f456|\U0001f9e3|\U0001f9e4|\U0001f9e5|\U0001f9e6|\U0001f457|\U0001f458|\U0001f97b|\U0001fa71|\U0001fa72|\U0001fa73|\U0001f459|\U0001f45a|\U0001f45b|\U0001f45c|\U0001f45d|\U0001f6cd|\U0001f392|\U0001f45e|\U0001f45f|\U0001f97e|\U0001f97f|\U0001f460|\U0001f461|\U0001fa70|\U0001f462|\U0001f451|\U0001f452|\U0001f3a9|\U0001f393|\U0001f9e2|\u26d1|\U0001f4ff|\U0001f484|\U0001f48d|\U0001f48e|\U0001f507|\U0001f508|\U0001f509|\U0001f50a|\U0001f4e2|\U0001f4e3|\U0001f4ef|\U0001f514|\U0001f515|\U0001f3bc|\U0001f3b5|\U0001f3b6|\U0001f399|\U0001f39a|\U0001f39b|\U0001f3a4|\U0001f3a7|\U0001f4fb|\U0001f3b7|\U0001f3b8|\U0001f3b9|\U0001f3ba|\U0001f3bb|\U0001fa95|\U0001f941|\U0001f4f1|\U0001f4f2|\u260e|\U0001f4de|\U0001f4df|\U0001f4e0|\U0001f50b|\U0001f50c|\U0001f4bb|\U0001f5a5|\U0001f5a8|\u2328|\U0001f5b1|\U0001f5b2|\U0001f4bd|\U0001f4be|\U0001f4bf|\U0001f4c0|\U0001f9ee|\U0001f3a5|\U0001f39e|\U0001f4fd|\U0001f3ac|\U0001f4fa|\U0001f4f7|\U0001f4f8|\U0001f4f9|\U0001f4fc|\U0001f50d|\U0001f50e|\U0001f56f|\U0001f4a1|\U0001f526|\U0001f3ee|\U0001fa94|\U0001f4d4|\U0001f4d5|\U0001f4d6|\U0001f4d7|\U0001f4d8|\U0001f4d9|\U0001f4da|\U0001f4d3|\U0001f4d2|\U0001f4c3|\U0001f4dc|\U0001f4c4|\U0001f4f0|\U0001f5de|\U0001f4d1|\U0001f516|\U0001f3f7|\U0001f4b0|\U0001f4b4|\U0001f4b5|\U0001f4b6|\U0001f4b7|\U0001f4b8|\U0001f4b3|\U0001f9fe|\U0001f4b9|\U0001f4b1|\U0001f4b2|\u2709|\U0001f4e7|\U0001f4e8|\U0001f4e9|\U0001f4e4|\U0001f4e5|\U0001f4e6|\U0001f4eb|\U0001f4ea|\U0001f4ec|\U0001f4ed|\U0001f4ee|\U0001f5f3|\u270f|\u2712|\U0001f58b|\U0001f58a|\U0001f58c|\U0001f58d|\U0001f4dd|\U0001f4bc|\U0001f4c1|\U0001f4c2|\U0001f5c2|\U0001f4c5|\U0001f4c6|\U0001f5d2|\U0001f5d3|\U0001f4c7|\U0001f4c8|\U0001f4c9|\U0001f4ca|\U0001f4cb|\U0001f4cc|\U0001f4cd|\U0001f4ce|\U0001f587|\U0001f4cf|\U0001f4d0|\u2702|\U0001f5c3|\U0001f5c4|\U0001f5d1|\U0001f512|\U0001f513|\U0001f50f|\U0001f510|\U0001f511|\U0001f5dd|\U0001f528|\U0001fa93|\u26cf|\u2692|\U0001f6e0|\U0001f5e1|\u2694|\U0001f52b|\U0001f3f9|\U0001f6e1|\U0001f527|\U0001f529|\u2699|\U0001f5dc|\u2696|\U0001f9af|\U0001f517|\u26d3|\U0001f9f0|\U0001f9f2|\u2697|\U0001f9ea|\U0001f9eb|\U0001f9ec|\U0001f52c|\U0001f52d|\U0001f4e1|\U0001f489|\U0001fa78|\U0001f48a|\U0001fa79|\U0001fa7a|\U0001f6aa|\U0001f6cf|\U0001f6cb|\U0001fa91|\U0001f6bd|\U0001f6bf|\U0001f6c1|\U0001fa92|\U0001f9f4|\U0001f9f7|\U0001f9f9|\U0001f9fa|\U0001f9fb|\U0001f9fc|\U0001f9fd|\U0001f9ef|\U0001f6d2|\U0001f6ac|\u26b0|\u26b1|\U0001f5ff|\U0001f3e7|\U0001f6ae|\U0001f6b0|\u267f|\U0001f6b9|\U0001f6ba|\U0001f6bb|\U0001f6bc|\U0001f6be|\U0001f6c2|\U0001f6c3|\U0001f6c4|\U0001f6c5|\u26a0|\U0001f6b8|\u26d4|\U0001f6ab|\U0001f6b3|\U0001f6ad|\U0001f6af|\U0001f6b1|\U0001f6b7|\U0001f4f5|\U0001f51e|\u2622|\u2623|\u2b06|\u2197|\u27a1|\u2198|\u2b07|\u2199|\u2b05|\u2196|\u2195|\u2194|\u21a9|\u21aa|\u2934|\u2935|\U0001f503|\U0001f504|\U0001f519|\U0001f51a|\U0001f51b|\U0001f51c|\U0001f51d|\U0001f6d0|\u269b|\U0001f549|\u2721|\u2638|\u262f|\u271d|\u2626|\u262a|\u262e|\U0001f54e|\U0001f52f|\u2648|\u2649|\u264a|\u264b|\u264c|\u264d|\u264e|\u264f|\u2650|\u2651|\u2652|\u2653|\u26ce|\U0001f500|\U0001f501|\U0001f502|\u25b6|\u23e9|\u23ed|\u23ef|\u25c0|\u23ea|\u23ee|\U0001f53c|\u23eb|\U0001f53d|\u23ec|\u23f8|\u23f9|\u23fa|\u23cf|\U0001f3a6|\U0001f505|\U0001f506|\U0001f4f6|\U0001f4f3|\U0001f4f4|\u2640|\u2642|\u2695|\u267e|\u267b|\u269c|\U0001f531|\U0001f4db|\U0001f530|\u2b55|\u2705|\u2611|\u2714|\u2716|\u274c|\u274e|\u2795|\u2796|\u2797|\u27b0|\u27bf|\u303d|\u2733|\u2734|\u2747|\u203c|\u2049|\u2753|\u2754|\u2755|\u2757|\u3030|\xa9|\xae|\u2122|\U0001f51f|\U0001f520|\U0001f521|\U0001f522|\U0001f523|\U0001f524|\U0001f170|\U0001f18e|\U0001f171|\U0001f191|\U0001f192|\U0001f193|\u2139|\U0001f194|\u24c2|\U0001f195|\U0001f196|\U0001f17e|\U0001f197|\U0001f17f|\U0001f198|\U0001f199|\U0001f19a|\U0001f201|\U0001f202|\U0001f237|\U0001f236|\U0001f22f|\U0001f250|\U0001f239|\U0001f21a|\U0001f232|\U0001f251|\U0001f238|\U0001f234|\U0001f233|\u3297|\u3299|\U0001f23a|\U0001f235|\U0001f534|\U0001f7e0|\U0001f7e1|\U0001f7e2|\U0001f535|\U0001f7e3|\U0001f7e4|\u26ab|\u26aa|\U0001f7e5|\U0001f7e7|\U0001f7e8|\U0001f7e9|\U0001f7e6|\U0001f7ea|\U0001f7eb|\u2b1b|\u2b1c|\u25fc|\u25fb|\u25fe|\u25fd|\u25aa|\u25ab|\U0001f536|\U0001f537|\U0001f538|\U0001f539|\U0001f53a|\U0001f53b|\U0001f4a0|\U0001f518|\U0001f533|\U0001f532|\U0001f3c1|\U0001f6a9|\U0001f38c|\U0001f3f4|\U0001f3f3')) -> str:
"""Return the string obtained by replacing all emojis in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all emojis in text with 'repl'
Reference:
akkez/emoji.py: Python emoji regexp / python emoji detection
https://gist.github.com/akkez/99ceeae2f13c9d8d9be7df0279e2c438
"""
text = regex.sub(repl, text)
return text
def _email_normalize(self, text: str, repl: str, regex=re.compile(r'[a-zA-Z0-9.!#$%&\'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9.]+')) -> str:
"""Return the string obtained by replacing all email addresses in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all email addresses in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _tel_normalize(self, text: str, repl: str, regex=re.compile(r'[()+\d.\-]*[ ]?\d{2,4}[-. ]+\d{3,4}[-. ]+\d{3,4}')) -> str:
"""Return the string obtained by replacing all phone numbers in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all phone numbers in text with 'repl'
"""
text = regex.sub(repl, text)
return text |
upsert | Expects a file *object*, not a file path. This is important because this has to work for both
the management command and the web uploader; the web uploader will pass in in-memory file
with no path!
Header row is:
Title, Group, Task List, Created Date, Due Date, Completed, Created By, Assigned To, Note, Priority | import codecs
import csv
import datetime
import logging
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from todo.models import Task, TaskList
log = logging.getLogger(__name__)
class CSVImporter:
"""Core upsert functionality for CSV import, for re-use by `import_csv` management command, web UI and tests.
Supplies a detailed log of what was and was not imported at the end. See README for usage notes.
"""
def __init__(self):
self.errors = []
self.upserts = []
self.summaries = []
self.line_count = 0
self.upsert_count = 0
# MASKED: upsert function (lines 26-102)
def validate_row(self, row):
"""Perform data integrity checks and set default values. Returns a valid object for insertion, or False.
Errors are stored for later display. Intentionally not broken up into separate validator functions because
there are interdpendencies, such as checking for existing `creator` in one place and then using
that creator for group membership check in others."""
row_errors = []
# #######################
# Task creator must exist
if not row.get("Created By"):
msg = f"Missing required task creator."
row_errors.append(msg)
creator = get_user_model().objects.filter(username=row.get("Created By")).first()
if not creator:
msg = f"Invalid task creator {row.get('Created By')}"
row_errors.append(msg)
# #######################
# If specified, Assignee must exist
assignee = None # Perfectly valid
if row.get("Assigned To"):
assigned = get_user_model().objects.filter(username=row.get("Assigned To"))
if assigned.exists():
assignee = assigned.first()
else:
msg = f"Missing or invalid task assignee {row.get('Assigned To')}"
row_errors.append(msg)
# #######################
# Group must exist
try:
target_group = Group.objects.get(name=row.get("Group"))
except Group.DoesNotExist:
msg = f"Could not find group {row.get('Group')}."
row_errors.append(msg)
target_group = None
# #######################
# Task creator must be in the target group
if creator and target_group not in creator.groups.all():
msg = f"{creator} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Assignee must be in the target group
if assignee and target_group not in assignee.groups.all():
msg = f"{assignee} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Task list must exist in the target group
try:
tasklist = TaskList.objects.get(name=row.get("Task List"), group=target_group)
row["Task List"] = tasklist
except TaskList.DoesNotExist:
msg = f"Task list {row.get('Task List')} in group {target_group} does not exist"
row_errors.append(msg)
# #######################
# Validate Dates
datefields = ["Due Date", "Created Date"]
for datefield in datefields:
datestring = row.get(datefield)
if datestring:
valid_date = self.validate_date(datestring)
if valid_date:
row[datefield] = valid_date
else:
msg = f"Could not convert {datefield} {datestring} to valid date instance"
row_errors.append(msg)
# #######################
# Group membership checks have passed
row["Created By"] = creator
row["Group"] = target_group
if assignee:
row["Assigned To"] = assignee
# Set Completed
row["Completed"] = row["Completed"] == "Yes"
# #######################
if row_errors:
self.errors.append({self.line_count: row_errors})
return False
# No errors:
return row
def validate_date(self, datestring):
"""Inbound date string from CSV translates to a valid python date."""
try:
date_obj = datetime.datetime.strptime(datestring, "%Y-%m-%d")
return date_obj
except ValueError:
return False | def upsert(self, fileobj, as_string_obj=False):
"""Expects a file *object*, not a file path. This is important because this has to work for both
the management command and the web uploader; the web uploader will pass in in-memory file
with no path!
Header row is:
Title, Group, Task List, Created Date, Due Date, Completed, Created By, Assigned To, Note, Priority
"""
if as_string_obj:
# fileobj comes from mgmt command
csv_reader = csv.DictReader(fileobj)
else:
# fileobj comes from browser upload (in-memory)
csv_reader = csv.DictReader(codecs.iterdecode(fileobj, "utf-8"))
# DI check: Do we have expected header row?
header = csv_reader.fieldnames
expected = [
"Title",
"Group",
"Task List",
"Created By",
"Created Date",
"Due Date",
"Completed",
"Assigned To",
"Note",
"Priority",
]
if header != expected:
self.errors.append(
f"Inbound data does not have expected columns.\nShould be: {expected}"
)
return
for row in csv_reader:
self.line_count += 1
newrow = self.validate_row(row)
if newrow:
# newrow at this point is fully validated, and all FK relations exist,
# e.g. `newrow.get("Assigned To")`, is a Django User instance.
assignee = newrow.get("Assigned To") if newrow.get("Assigned To") else None
created_at = (
newrow.get("Created Date")
if newrow.get("Created Date")
else datetime.datetime.today()
)
due_date = newrow.get("Due Date") if newrow.get("Due Date") else None
priority = newrow.get("Priority") if newrow.get("Priority") else None
obj, created = Task.objects.update_or_create(
created_by=newrow.get("Created By"),
task_list=newrow.get("Task List"),
title=newrow.get("Title"),
defaults={
"assigned_to": assignee,
"completed": newrow.get("Completed"),
"created_at": created_at,
"due_date": due_date,
"note": newrow.get("Note"),
"priority": priority,
},
)
self.upsert_count += 1
msg = (
f'Upserted task {obj.id}: "{obj.title}"'
f' in list "{obj.task_list}" (group "{obj.task_list.group}")'
)
self.upserts.append(msg)
self.summaries.append(f"Processed {self.line_count} CSV rows")
self.summaries.append(f"Upserted {self.upsert_count} rows")
self.summaries.append(f"Skipped {self.line_count - self.upsert_count} rows")
return {"summaries": self.summaries, "upserts": self.upserts, "errors": self.errors} | 26 | 102 | import codecs
import csv
import datetime
import logging
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from todo.models import Task, TaskList
log = logging.getLogger(__name__)
class CSVImporter:
"""Core upsert functionality for CSV import, for re-use by `import_csv` management command, web UI and tests.
Supplies a detailed log of what was and was not imported at the end. See README for usage notes.
"""
def __init__(self):
self.errors = []
self.upserts = []
self.summaries = []
self.line_count = 0
self.upsert_count = 0
def upsert(self, fileobj, as_string_obj=False):
"""Expects a file *object*, not a file path. This is important because this has to work for both
the management command and the web uploader; the web uploader will pass in in-memory file
with no path!
Header row is:
Title, Group, Task List, Created Date, Due Date, Completed, Created By, Assigned To, Note, Priority
"""
if as_string_obj:
# fileobj comes from mgmt command
csv_reader = csv.DictReader(fileobj)
else:
# fileobj comes from browser upload (in-memory)
csv_reader = csv.DictReader(codecs.iterdecode(fileobj, "utf-8"))
# DI check: Do we have expected header row?
header = csv_reader.fieldnames
expected = [
"Title",
"Group",
"Task List",
"Created By",
"Created Date",
"Due Date",
"Completed",
"Assigned To",
"Note",
"Priority",
]
if header != expected:
self.errors.append(
f"Inbound data does not have expected columns.\nShould be: {expected}"
)
return
for row in csv_reader:
self.line_count += 1
newrow = self.validate_row(row)
if newrow:
# newrow at this point is fully validated, and all FK relations exist,
# e.g. `newrow.get("Assigned To")`, is a Django User instance.
assignee = newrow.get("Assigned To") if newrow.get("Assigned To") else None
created_at = (
newrow.get("Created Date")
if newrow.get("Created Date")
else datetime.datetime.today()
)
due_date = newrow.get("Due Date") if newrow.get("Due Date") else None
priority = newrow.get("Priority") if newrow.get("Priority") else None
obj, created = Task.objects.update_or_create(
created_by=newrow.get("Created By"),
task_list=newrow.get("Task List"),
title=newrow.get("Title"),
defaults={
"assigned_to": assignee,
"completed": newrow.get("Completed"),
"created_at": created_at,
"due_date": due_date,
"note": newrow.get("Note"),
"priority": priority,
},
)
self.upsert_count += 1
msg = (
f'Upserted task {obj.id}: "{obj.title}"'
f' in list "{obj.task_list}" (group "{obj.task_list.group}")'
)
self.upserts.append(msg)
self.summaries.append(f"Processed {self.line_count} CSV rows")
self.summaries.append(f"Upserted {self.upsert_count} rows")
self.summaries.append(f"Skipped {self.line_count - self.upsert_count} rows")
return {"summaries": self.summaries, "upserts": self.upserts, "errors": self.errors}
def validate_row(self, row):
"""Perform data integrity checks and set default values. Returns a valid object for insertion, or False.
Errors are stored for later display. Intentionally not broken up into separate validator functions because
there are interdpendencies, such as checking for existing `creator` in one place and then using
that creator for group membership check in others."""
row_errors = []
# #######################
# Task creator must exist
if not row.get("Created By"):
msg = f"Missing required task creator."
row_errors.append(msg)
creator = get_user_model().objects.filter(username=row.get("Created By")).first()
if not creator:
msg = f"Invalid task creator {row.get('Created By')}"
row_errors.append(msg)
# #######################
# If specified, Assignee must exist
assignee = None # Perfectly valid
if row.get("Assigned To"):
assigned = get_user_model().objects.filter(username=row.get("Assigned To"))
if assigned.exists():
assignee = assigned.first()
else:
msg = f"Missing or invalid task assignee {row.get('Assigned To')}"
row_errors.append(msg)
# #######################
# Group must exist
try:
target_group = Group.objects.get(name=row.get("Group"))
except Group.DoesNotExist:
msg = f"Could not find group {row.get('Group')}."
row_errors.append(msg)
target_group = None
# #######################
# Task creator must be in the target group
if creator and target_group not in creator.groups.all():
msg = f"{creator} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Assignee must be in the target group
if assignee and target_group not in assignee.groups.all():
msg = f"{assignee} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Task list must exist in the target group
try:
tasklist = TaskList.objects.get(name=row.get("Task List"), group=target_group)
row["Task List"] = tasklist
except TaskList.DoesNotExist:
msg = f"Task list {row.get('Task List')} in group {target_group} does not exist"
row_errors.append(msg)
# #######################
# Validate Dates
datefields = ["Due Date", "Created Date"]
for datefield in datefields:
datestring = row.get(datefield)
if datestring:
valid_date = self.validate_date(datestring)
if valid_date:
row[datefield] = valid_date
else:
msg = f"Could not convert {datefield} {datestring} to valid date instance"
row_errors.append(msg)
# #######################
# Group membership checks have passed
row["Created By"] = creator
row["Group"] = target_group
if assignee:
row["Assigned To"] = assignee
# Set Completed
row["Completed"] = row["Completed"] == "Yes"
# #######################
if row_errors:
self.errors.append({self.line_count: row_errors})
return False
# No errors:
return row
def validate_date(self, datestring):
"""Inbound date string from CSV translates to a valid python date."""
try:
date_obj = datetime.datetime.strptime(datestring, "%Y-%m-%d")
return date_obj
except ValueError:
return False
|
install | Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False | import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
# MASKED: install function (lines 44-132)
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20)) | def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook | 44 | 132 | import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
|
pretty_repr | Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object. | import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
# MASKED: pretty_repr function (lines 587-619)
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20)) | def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str | 587 | 619 | import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
|
check_length | Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False. | import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
# MASKED: check_length function (lines 311-326)
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20)) | def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True | 311 | 326 | import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
|
render | Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object. | import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
# MASKED: render function (lines 332-355)
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20)) | def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str | 332 | 355 | import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
|
__init__ | Creates EntitySet
Args:
id (str) : Unique identifier to associate with this instance
entities (dict[str -> tuple(pd.DataFrame, str, str, dict[str -> Variable])]): dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_index), (variable_types), (make_index))}.
Note that time_index, variable_types and make_index are optional.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
Example:
.. code-block:: python
entities = {
"cards" : (card_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
ft.EntitySet("my-entity-set", entities, relationships) | import copy
import logging
from collections import defaultdict
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal, is_numeric_dtype
import featuretools.variable_types.variable as vtypes
from featuretools.entityset import deserialize, serialize
from featuretools.entityset.entity import Entity
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.utils.gen_utils import import_or_raise
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger('featuretools.entityset')
class EntitySet(object):
"""
Stores all actual data for a entityset
Attributes:
id
entity_dict
relationships
time_type
Properties:
metadata
"""
# MASKED: __init__ function (lines 35-94)
def __sizeof__(self):
return sum([entity.__sizeof__() for entity in self.entities])
def __dask_tokenize__(self):
return (EntitySet, serialize.entityset_to_description(self.metadata))
def __eq__(self, other, deep=False):
if len(self.entity_dict) != len(other.entity_dict):
return False
for eid, e in self.entity_dict.items():
if eid not in other.entity_dict:
return False
if not e.__eq__(other[eid], deep=deep):
return False
for r in other.relationships:
if r not in other.relationships:
return False
return True
def __ne__(self, other, deep=False):
return not self.__eq__(other, deep=deep)
def __getitem__(self, entity_id):
"""Get entity instance from entityset
Args:
entity_id (str): Id of entity.
Returns:
:class:`.Entity` : Instance of entity. None if entity doesn't
exist.
"""
if entity_id in self.entity_dict:
return self.entity_dict[entity_id]
name = self.id or "entity set"
raise KeyError('Entity %s does not exist in %s' % (entity_id, name))
@property
def entities(self):
return list(self.entity_dict.values())
@property
def metadata(self):
'''Returns the metadata for this EntitySet. The metadata will be recomputed if it does not exist.'''
if self._data_description is None:
description = serialize.entityset_to_description(self)
self._data_description = deserialize.description_to_entityset(description)
return self._data_description
def reset_data_description(self):
self._data_description = None
def to_pickle(self, path, compression=None, profile_name=None):
'''Write entityset in the pickle format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='pickle', compression=compression, profile_name=profile_name)
return self
def to_parquet(self, path, engine='auto', compression=None, profile_name=None):
'''Write entityset to disk in the parquet format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
engine (str) : Name of the engine to use. Possible values are: {'auto', 'pyarrow', 'fastparquet'}.
compression (str) : Name of the compression to use. Possible values are: {'snappy', 'gzip', 'brotli', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='parquet', engine=engine, compression=compression, profile_name=profile_name)
return self
def to_csv(self, path, sep=',', encoding='utf-8', engine='python', compression=None, profile_name=None):
'''Write entityset to disk in the csv format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str) : Location on disk to write to (will be created as a directory)
sep (str) : String of length 1. Field delimiter for the output file.
encoding (str) : A string representing the encoding to use in the output file, defaults to 'utf-8'.
engine (str) : Name of the engine to use. Possible values are: {'c', 'python'}.
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='csv', index=False, sep=sep, encoding=encoding, engine=engine, compression=compression, profile_name=profile_name)
return self
def to_dictionary(self):
return serialize.entityset_to_description(self)
###########################################################################
# Public getter/setter methods #########################################
###########################################################################
def __repr__(self):
repr_out = u"Entityset: {}\n".format(self.id)
repr_out += u" Entities:"
for e in self.entities:
if e.df.shape:
repr_out += u"\n {} [Rows: {}, Columns: {}]".format(
e.id, e.df.shape[0], e.df.shape[1])
else:
repr_out += u"\n {} [Rows: None, Columns: None]".format(
e.id)
repr_out += "\n Relationships:"
if len(self.relationships) == 0:
repr_out += u"\n No relationships"
for r in self.relationships:
repr_out += u"\n %s.%s -> %s.%s" % \
(r._child_entity_id, r._child_variable_id,
r._parent_entity_id, r._parent_variable_id)
return repr_out
def add_relationships(self, relationships):
"""Add multiple new relationships to a entityset
Args:
relationships (list[Relationship]) : List of new
relationships.
"""
return [self.add_relationship(r) for r in relationships][-1]
def add_relationship(self, relationship):
"""Add a new relationship between entities in the entityset
Args:
relationship (Relationship) : Instance of new
relationship to be added.
"""
if relationship in self.relationships:
logger.warning(
"Not adding duplicate relationship: %s", relationship)
return self
# _operations?
# this is a new pair of entities
child_e = relationship.child_entity
child_v = relationship.child_variable.id
parent_e = relationship.parent_entity
parent_v = relationship.parent_variable.id
if not isinstance(child_e[child_v], vtypes.Id):
child_e.convert_variable_type(variable_id=child_v,
new_type=vtypes.Id,
convert_data=False)
if not isinstance(parent_e[parent_v], vtypes.Index):
parent_e.convert_variable_type(variable_id=parent_v,
new_type=vtypes.Index,
convert_data=False)
# Empty dataframes (as a result of accessing Entity.metadata)
# default to object dtypes for discrete variables, but
# indexes/ids default to ints. In this case, we convert
# the empty column's type to int
if isinstance(child_e.df, pd.DataFrame) and \
(child_e.df.empty and child_e.df[child_v].dtype == object and
is_numeric_dtype(parent_e.df[parent_v])):
child_e.df[child_v] = pd.Series(name=child_v, dtype=np.int64)
parent_dtype = parent_e.df[parent_v].dtype
child_dtype = child_e.df[child_v].dtype
msg = u"Unable to add relationship because {} in {} is Pandas dtype {}"\
u" and {} in {} is Pandas dtype {}."
if not is_dtype_equal(parent_dtype, child_dtype):
raise ValueError(msg.format(parent_v, parent_e.id, parent_dtype,
child_v, child_e.id, child_dtype))
self.relationships.append(relationship)
self.reset_data_description()
return self
###########################################################################
# Relationship access/helper methods ###################################
###########################################################################
def find_forward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all forward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : id of entity to start the search from
goal_entity_id (str) : if of entity to find forward path to
See Also:
:func:`BaseEntitySet.find_backward_paths`
"""
for sub_entity_id, path in self._forward_entity_paths(start_entity_id):
if sub_entity_id == goal_entity_id:
yield path
def find_backward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all backward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : Id of entity to start the search from.
goal_entity_id (str) : Id of entity to find backward path to.
See Also:
:func:`BaseEntitySet.find_forward_paths`
"""
for path in self.find_forward_paths(goal_entity_id, start_entity_id):
# Reverse path
yield path[::-1]
def _forward_entity_paths(self, start_entity_id, seen_entities=None):
"""
Generator which yields the ids of all entities connected through forward
relationships, and the path taken to each. An entity will be yielded
multiple times if there are multiple paths to it.
Implemented using depth first search.
"""
if seen_entities is None:
seen_entities = set()
if start_entity_id in seen_entities:
return
seen_entities.add(start_entity_id)
yield start_entity_id, []
for relationship in self.get_forward_relationships(start_entity_id):
next_entity = relationship.parent_entity.id
# Copy seen entities for each next node to allow multiple paths (but
# not cycles).
descendants = self._forward_entity_paths(next_entity, seen_entities.copy())
for sub_entity_id, sub_path in descendants:
yield sub_entity_id, [relationship] + sub_path
def get_forward_entities(self, entity_id, deep=False):
"""
Get entities that are in a forward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find forward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_forward_relationships(entity_id):
parent_eid = relationship.parent_entity.id
direct_path = RelationshipPath([(True, relationship)])
yield parent_eid, direct_path
if deep:
sub_entities = self.get_forward_entities(parent_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_backward_entities(self, entity_id, deep=False):
"""
Get entities that are in a backward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find backward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_backward_relationships(entity_id):
child_eid = relationship.child_entity.id
direct_path = RelationshipPath([(False, relationship)])
yield child_eid, direct_path
if deep:
sub_entities = self.get_backward_entities(child_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_forward_relationships(self, entity_id):
"""Get relationships where entity "entity_id" is the child
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: List of forward relationships.
"""
return [r for r in self.relationships if r.child_entity.id == entity_id]
def get_backward_relationships(self, entity_id):
"""
get relationships where entity "entity_id" is the parent.
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: list of backward relationships
"""
return [r for r in self.relationships if r.parent_entity.id == entity_id]
def has_unique_forward_path(self, start_entity_id, end_entity_id):
"""
Is the forward path from start to end unique?
This will raise if there is no such path.
"""
paths = self.find_forward_paths(start_entity_id, end_entity_id)
next(paths)
second_path = next(paths, None)
return not second_path
###########################################################################
# Entity creation methods ##############################################
###########################################################################
def entity_from_dataframe(self,
entity_id,
dataframe,
index=None,
variable_types=None,
make_index=False,
time_index=None,
secondary_time_index=None,
already_sorted=False):
"""
Load the data for a specified entity from a Pandas DataFrame.
Args:
entity_id (str) : Unique id to associate with this entity.
dataframe (pandas.DataFrame) : Dataframe containing the data.
index (str, optional): Name of the variable used to index the entity.
If None, take the first column.
variable_types (dict[str -> Variable/str], optional):
Keys are of variable ids and values are variable types or type_strings. Used to to
initialize an entity's store.
make_index (bool, optional) : If True, assume index does not
exist as a column in dataframe, and create a new column of that name
using integers. Otherwise, assume index exists.
time_index (str, optional): Name of the variable containing
time data. Type must be in :class:`variables.DateTime` or be
able to be cast to datetime (e.g. str, float, or numeric.)
secondary_time_index (dict[str -> Variable]): Name of variable
containing time data to use a second time index for the entity.
already_sorted (bool, optional) : If True, assumes that input dataframe
is already sorted by time. Defaults to False.
Notes:
Will infer variable types from Pandas dtype
Example:
.. ipython:: python
import featuretools as ft
import pandas as pd
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"session_id": [1, 2, 1, 3, 4, 5],
"amount": [100.40, 20.63, 33.32, 13.12, 67.22, 1.00],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"fraud": [True, False, True, False, True, True]})
es = ft.EntitySet("example")
es.entity_from_dataframe(entity_id="transactions",
index="id",
time_index="transaction_time",
dataframe=transactions_df)
es["transactions"]
es["transactions"].df
"""
variable_types = variable_types or {}
if time_index is not None and time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (time_index))
if time_index is None:
for variable, variable_type in variable_types.items():
if variable_type == vtypes.DatetimeTimeIndex:
raise ValueError("DatetimeTimeIndex variable %s must be set using time_index parameter" % (variable))
if len(self.entities) > 0:
if not isinstance(dataframe, type(self.entities[0].df)):
raise ValueError("All entity dataframes must be of the same type. "
"Cannot add entity of type {} to an entityset with existing entities "
"of type {}".format(type(dataframe), type(self.entities[0].df)))
entity = Entity(
entity_id,
dataframe,
self,
variable_types=variable_types,
index=index,
time_index=time_index,
secondary_time_index=secondary_time_index,
already_sorted=already_sorted,
make_index=make_index)
self.entity_dict[entity.id] = entity
self.reset_data_description()
return self
def normalize_entity(self, base_entity_id, new_entity_id, index,
additional_variables=None, copy_variables=None,
make_time_index=None,
make_secondary_time_index=None,
new_entity_time_index=None,
new_entity_secondary_time_index=None):
"""Create a new entity and relationship from unique values of an existing variable.
Args:
base_entity_id (str) : Entity id from which to split.
new_entity_id (str): Id of the new entity.
index (str): Variable in old entity
that will become index of new entity. Relationship
will be created across this variable.
additional_variables (list[str]):
List of variable ids to remove from
base_entity and move to new entity.
copy_variables (list[str]): List of
variable ids to copy from old entity
and move to new entity.
make_time_index (bool or str, optional): Create time index for new entity based
on time index in base_entity, optionally specifying which variable in base_entity
to use for time_index. If specified as True without a specific variable,
uses the primary time index. Defaults to True if base entity has a time index.
make_secondary_time_index (dict[str -> list[str]], optional): Create a secondary time index
from key. Values of dictionary
are the variables to associate with the secondary time index. Only one
secondary time index is allowed. If None, only associate the time index.
new_entity_time_index (str, optional): Rename new entity time index.
new_entity_secondary_time_index (str, optional): Rename new entity secondary time index.
"""
base_entity = self.entity_dict[base_entity_id]
additional_variables = additional_variables or []
copy_variables = copy_variables or []
# Check base entity to make sure time index is valid
if base_entity.time_index is not None:
t_index = base_entity[base_entity.time_index]
if not isinstance(t_index, (vtypes.NumericTimeIndex, vtypes.DatetimeTimeIndex)):
base_error = "Time index '{0}' is not a NumericTimeIndex or DatetimeTimeIndex, but type {1}. Use set_time_index on entity '{2}' to set the time_index."
raise TypeError(base_error.format(base_entity.time_index, type(t_index), str(base_entity.id)))
if not isinstance(additional_variables, list):
raise TypeError("'additional_variables' must be a list, but received type {}"
.format(type(additional_variables)))
if len(additional_variables) != len(set(additional_variables)):
raise ValueError("'additional_variables' contains duplicate variables. All variables must be unique.")
if not isinstance(copy_variables, list):
raise TypeError("'copy_variables' must be a list, but received type {}"
.format(type(copy_variables)))
if len(copy_variables) != len(set(copy_variables)):
raise ValueError("'copy_variables' contains duplicate variables. All variables must be unique.")
for v in additional_variables + copy_variables:
if v == index:
raise ValueError("Not copying {} as both index and variable".format(v))
for v in additional_variables:
if v == base_entity.time_index:
raise ValueError("Not moving {} as it is the base time index variable. Perhaps, move the variable to the copy_variables.".format(v))
if isinstance(make_time_index, str):
if make_time_index not in base_entity.df.columns:
raise ValueError("'make_time_index' must be a variable in the base entity")
elif make_time_index not in additional_variables + copy_variables:
raise ValueError("'make_time_index' must be specified in 'additional_variables' or 'copy_variables'")
if index == base_entity.index:
raise ValueError("'index' must be different from the index column of the base entity")
transfer_types = {}
transfer_types[index] = type(base_entity[index])
for v in additional_variables + copy_variables:
if type(base_entity[v]) == vtypes.DatetimeTimeIndex:
transfer_types[v] = vtypes.Datetime
elif type(base_entity[v]) == vtypes.NumericTimeIndex:
transfer_types[v] = vtypes.Numeric
else:
transfer_types[v] = type(base_entity[v])
# create and add new entity
new_entity_df = self[base_entity_id].df.copy()
if make_time_index is None and base_entity.time_index is not None:
make_time_index = True
if isinstance(make_time_index, str):
# Set the new time index to make_time_index.
base_time_index = make_time_index
new_entity_time_index = make_time_index
already_sorted = (new_entity_time_index == base_entity.time_index)
elif make_time_index:
# Create a new time index based on the base entity time index.
base_time_index = base_entity.time_index
if new_entity_time_index is None:
new_entity_time_index = "first_%s_time" % (base_entity.id)
already_sorted = True
assert base_entity.time_index is not None, \
"Base entity doesn't have time_index defined"
if base_time_index not in [v for v in additional_variables]:
copy_variables.append(base_time_index)
transfer_types[new_entity_time_index] = type(base_entity[base_entity.time_index])
else:
new_entity_time_index = None
already_sorted = False
if new_entity_time_index is not None and new_entity_time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (new_entity_time_index))
selected_variables = [index] +\
[v for v in additional_variables] +\
[v for v in copy_variables]
new_entity_df2 = new_entity_df. \
drop_duplicates(index, keep='first')[selected_variables]
if make_time_index:
new_entity_df2 = new_entity_df2.rename(columns={base_time_index: new_entity_time_index})
if make_secondary_time_index:
assert len(make_secondary_time_index) == 1, "Can only provide 1 secondary time index"
secondary_time_index = list(make_secondary_time_index.keys())[0]
secondary_variables = [index, secondary_time_index] + list(make_secondary_time_index.values())[0]
secondary_df = new_entity_df. \
drop_duplicates(index, keep='last')[secondary_variables]
if new_entity_secondary_time_index:
secondary_df = secondary_df.rename(columns={secondary_time_index: new_entity_secondary_time_index})
secondary_time_index = new_entity_secondary_time_index
else:
new_entity_secondary_time_index = secondary_time_index
secondary_df = secondary_df.set_index(index)
new_entity_df = new_entity_df2.join(secondary_df, on=index)
else:
new_entity_df = new_entity_df2
base_entity_index = index
transfer_types[index] = vtypes.Categorical
if make_secondary_time_index:
old_ti_name = list(make_secondary_time_index.keys())[0]
ti_cols = list(make_secondary_time_index.values())[0]
ti_cols = [c if c != old_ti_name else secondary_time_index for c in ti_cols]
make_secondary_time_index = {secondary_time_index: ti_cols}
self.entity_from_dataframe(
new_entity_id,
new_entity_df,
index,
already_sorted=already_sorted,
time_index=new_entity_time_index,
secondary_time_index=make_secondary_time_index,
variable_types=transfer_types)
self.entity_dict[base_entity_id].delete_variables(additional_variables)
new_entity = self.entity_dict[new_entity_id]
base_entity.convert_variable_type(base_entity_index, vtypes.Id, convert_data=False)
self.add_relationship(Relationship(new_entity[index], base_entity[base_entity_index]))
self.reset_data_description()
return self
###########################################################################
# Data wrangling methods ###############################################
###########################################################################
def concat(self, other, inplace=False):
'''Combine entityset with another to create a new entityset with the
combined data of both entitysets.
'''
assert_string = "Entitysets must have the same entities, relationships"\
", and variable_ids"
assert (self.__eq__(other) and
self.relationships == other.relationships), assert_string
for entity in self.entities:
assert entity.id in other.entity_dict, assert_string
assert (len(self[entity.id].variables) ==
len(other[entity.id].variables)), assert_string
other_variable_ids = [o_variable.id for o_variable in
other[entity.id].variables]
assert (all([variable.id in other_variable_ids
for variable in self[entity.id].variables])), assert_string
if inplace:
combined_es = self
else:
combined_es = copy.deepcopy(self)
has_last_time_index = []
for entity in self.entities:
self_df = entity.df
other_df = other[entity.id].df
combined_df = pd.concat([self_df, other_df])
if entity.created_index == entity.index:
columns = [col for col in combined_df.columns if
col != entity.index or col != entity.time_index]
else:
columns = [entity.index]
combined_df.drop_duplicates(columns, inplace=True)
if entity.time_index:
combined_df.sort_values([entity.time_index, entity.index], inplace=True)
else:
combined_df.sort_index(inplace=True)
if (entity.last_time_index is not None or
other[entity.id].last_time_index is not None):
has_last_time_index.append(entity.id)
combined_es[entity.id].update_data(df=combined_df,
recalculate_last_time_indexes=False)
combined_es.add_last_time_indexes(updated_entities=has_last_time_index)
self.reset_data_description()
return combined_es
###########################################################################
# Indexing methods ###############################################
###########################################################################
def add_last_time_indexes(self, updated_entities=None):
"""
Calculates the last time index values for each entity (the last time
an instance or children of that instance were observed). Used when
calculating features using training windows
Args:
updated_entities (list[str]): List of entity ids to update last_time_index for
(will update all parents of those entities as well)
"""
# Generate graph of entities to find leaf entities
children = defaultdict(list) # parent --> child mapping
child_vars = defaultdict(dict)
for r in self.relationships:
children[r.parent_entity.id].append(r.child_entity)
child_vars[r.parent_entity.id][r.child_entity.id] = r.child_variable
updated_entities = updated_entities or []
if updated_entities:
# find parents of updated_entities
parent_queue = updated_entities[:]
parents = set()
while len(parent_queue):
e = parent_queue.pop(0)
if e in parents:
continue
parents.add(e)
for parent_id, _ in self.get_forward_entities(e):
parent_queue.append(parent_id)
queue = [self[p] for p in parents]
to_explore = parents
else:
to_explore = set([e.id for e in self.entities[:]])
queue = self.entities[:]
explored = set()
for e in queue:
e.last_time_index = None
# We will explore children of entities on the queue,
# which may not be in the to_explore set. Therefore,
# we check whether all elements of to_explore are in
# explored, rather than just comparing length
while not to_explore.issubset(explored):
entity = queue.pop(0)
if entity.last_time_index is None:
if entity.time_index is not None:
lti = entity.df[entity.time_index].copy()
if isinstance(entity.df, dd.DataFrame):
# The current Dask implementation doesn't set the index of the dataframe
# to the entity's index, so we have to do it manually here
lti.index = entity.df[entity.index].copy()
else:
lti = entity.df[entity.index].copy()
if isinstance(entity.df, dd.DataFrame):
lti.index = entity.df[entity.index].copy()
lti = lti.apply(lambda x: None)
else:
lti[:] = None
entity.last_time_index = lti
if entity.id in children:
child_entities = children[entity.id]
# if all children not explored, skip for now
if not set([e.id for e in child_entities]).issubset(explored):
# Now there is a possibility that a child entity
# was not explicitly provided in updated_entities,
# and never made it onto the queue. If updated_entities
# is None then we just load all entities onto the queue
# so we didn't need this logic
for e in child_entities:
if e.id not in explored and e.id not in [q.id for q in queue]:
queue.append(e)
queue.append(entity)
continue
# updated last time from all children
for child_e in child_entities:
if child_e.last_time_index is None:
continue
link_var = child_vars[entity.id][child_e.id].id
if isinstance(child_e.last_time_index, dd.Series):
to_join = child_e.df[link_var]
to_join.index = child_e.df[child_e.index]
lti_df = child_e.last_time_index.to_frame(name='last_time').join(
to_join.to_frame(name=entity.index)
)
new_index = lti_df.index.copy()
new_index.name = None
lti_df.index = new_index
lti_df = lti_df.groupby(lti_df[entity.index]).agg('max')
lti_df = entity.last_time_index.to_frame(name='last_time_old').join(lti_df)
else:
lti_df = pd.DataFrame({'last_time': child_e.last_time_index,
entity.index: child_e.df[link_var]})
# sort by time and keep only the most recent
lti_df.sort_values(['last_time', entity.index],
kind="mergesort", inplace=True)
lti_df.drop_duplicates(entity.index,
keep='last',
inplace=True)
lti_df.set_index(entity.index, inplace=True)
lti_df = lti_df.reindex(entity.last_time_index.index)
lti_df['last_time_old'] = entity.last_time_index
if not isinstance(lti_df, dd.DataFrame) and lti_df.empty:
# Pandas errors out if it tries to do fillna and then max on an empty dataframe
lti_df = pd.Series()
else:
lti_df['last_time'] = lti_df['last_time'].astype('datetime64[ns]')
lti_df['last_time_old'] = lti_df['last_time_old'].astype('datetime64[ns]')
lti_df = lti_df.fillna(pd.to_datetime('1800-01-01 00:00')).max(axis=1)
lti_df = lti_df.replace(pd.to_datetime('1800-01-01 00:00'), pd.NaT)
# lti_df = lti_df.apply(lambda x: x.dropna().max(), axis=1)
entity.last_time_index = lti_df
entity.last_time_index.name = 'last_time'
explored.add(entity.id)
self.reset_data_description()
###########################################################################
# Other ###############################################
###########################################################################
def add_interesting_values(self, max_values=5, verbose=False):
"""Find interesting values for categorical variables, to be used to generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for entity in self.entities:
entity.add_interesting_values(max_values=max_values, verbose=verbose)
self.reset_data_description()
def plot(self, to_file=None):
"""
Create a UML diagram-ish graph of the EntitySet.
Args:
to_file (str, optional) : Path to where the plot should be saved.
If set to None (as by default), the plot will not be saved.
Returns:
graphviz.Digraph : Graph object that can directly be displayed in
Jupyter notebooks.
"""
GRAPHVIZ_ERR_MSG = ('Please install graphviz to plot entity sets.' +
' (See https://docs.featuretools.com/en/stable/getting_started/install.html#installing-graphviz for' +
' details)')
graphviz = import_or_raise("graphviz", GRAPHVIZ_ERR_MSG)
# Try rendering a dummy graph to see if a working backend is installed
try:
graphviz.Digraph().pipe()
except graphviz.backend.ExecutableNotFound:
raise RuntimeError(
"To plot entity sets, a graphviz backend is required.\n" +
"Install the backend using one of the following commands:\n" +
" Mac OS: brew install graphviz\n" +
" Linux (Ubuntu): sudo apt-get install graphviz\n" +
" Windows: conda install python-graphviz\n" +
" For more details visit: https://docs.featuretools.com/en/stable/getting_started/install.html"
)
if to_file:
# Explicitly cast to str in case a Path object was passed in
to_file = str(to_file)
split_path = to_file.split('.')
if len(split_path) < 2:
raise ValueError("Please use a file extension like '.pdf'" +
" so that the format can be inferred")
format = split_path[-1]
valid_formats = graphviz.backend.FORMATS
if format not in valid_formats:
raise ValueError("Unknown format. Make sure your format is" +
" amongst the following: %s" % valid_formats)
else:
format = None
# Initialize a new directed graph
graph = graphviz.Digraph(self.id, format=format,
graph_attr={'splines': 'ortho'})
# Draw entities
for entity in self.entities:
variables_string = '\l'.join([var.id + ' : ' + var.type_string # noqa: W605
for var in entity.variables])
nrows = entity.shape[0]
label = '{%s (%d row%s)|%s\l}' % (entity.id, nrows, 's' * (nrows > 1), variables_string) # noqa: W605
graph.node(entity.id, shape='record', label=label)
# Draw relationships
for rel in self.relationships:
# Display the key only once if is the same for both related entities
if rel._parent_variable_id == rel._child_variable_id:
label = rel._parent_variable_id
else:
label = '%s -> %s' % (rel._parent_variable_id,
rel._child_variable_id)
graph.edge(rel._child_entity_id, rel._parent_entity_id, xlabel=label)
if to_file:
# Graphviz always appends the format to the file name, so we need to
# remove it manually to avoid file names like 'file_name.pdf.pdf'
offset = len(format) + 1 # Add 1 for the dot
output_path = to_file[:-offset]
graph.render(output_path, cleanup=True)
return graph | def __init__(self, id=None, entities=None, relationships=None):
"""Creates EntitySet
Args:
id (str) : Unique identifier to associate with this instance
entities (dict[str -> tuple(pd.DataFrame, str, str, dict[str -> Variable])]): dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_index), (variable_types), (make_index))}.
Note that time_index, variable_types and make_index are optional.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
Example:
.. code-block:: python
entities = {
"cards" : (card_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
ft.EntitySet("my-entity-set", entities, relationships)
"""
self.id = id
self.entity_dict = {}
self.relationships = []
self.time_type = None
entities = entities or {}
relationships = relationships or []
for entity in entities:
df = entities[entity][0]
index_column = entities[entity][1]
time_index = None
variable_types = None
make_index = None
if len(entities[entity]) > 2:
time_index = entities[entity][2]
if len(entities[entity]) > 3:
variable_types = entities[entity][3]
if len(entities[entity]) > 4:
make_index = entities[entity][4]
self.entity_from_dataframe(entity_id=entity,
dataframe=df,
index=index_column,
time_index=time_index,
variable_types=variable_types,
make_index=make_index)
for relationship in relationships:
parent_variable = self[relationship[0]][relationship[1]]
child_variable = self[relationship[2]][relationship[3]]
self.add_relationship(Relationship(parent_variable,
child_variable))
self.reset_data_description() | 35 | 94 | import copy
import logging
from collections import defaultdict
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal, is_numeric_dtype
import featuretools.variable_types.variable as vtypes
from featuretools.entityset import deserialize, serialize
from featuretools.entityset.entity import Entity
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.utils.gen_utils import import_or_raise
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger('featuretools.entityset')
class EntitySet(object):
"""
Stores all actual data for a entityset
Attributes:
id
entity_dict
relationships
time_type
Properties:
metadata
"""
def __init__(self, id=None, entities=None, relationships=None):
"""Creates EntitySet
Args:
id (str) : Unique identifier to associate with this instance
entities (dict[str -> tuple(pd.DataFrame, str, str, dict[str -> Variable])]): dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_index), (variable_types), (make_index))}.
Note that time_index, variable_types and make_index are optional.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
Example:
.. code-block:: python
entities = {
"cards" : (card_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
ft.EntitySet("my-entity-set", entities, relationships)
"""
self.id = id
self.entity_dict = {}
self.relationships = []
self.time_type = None
entities = entities or {}
relationships = relationships or []
for entity in entities:
df = entities[entity][0]
index_column = entities[entity][1]
time_index = None
variable_types = None
make_index = None
if len(entities[entity]) > 2:
time_index = entities[entity][2]
if len(entities[entity]) > 3:
variable_types = entities[entity][3]
if len(entities[entity]) > 4:
make_index = entities[entity][4]
self.entity_from_dataframe(entity_id=entity,
dataframe=df,
index=index_column,
time_index=time_index,
variable_types=variable_types,
make_index=make_index)
for relationship in relationships:
parent_variable = self[relationship[0]][relationship[1]]
child_variable = self[relationship[2]][relationship[3]]
self.add_relationship(Relationship(parent_variable,
child_variable))
self.reset_data_description()
def __sizeof__(self):
return sum([entity.__sizeof__() for entity in self.entities])
def __dask_tokenize__(self):
return (EntitySet, serialize.entityset_to_description(self.metadata))
def __eq__(self, other, deep=False):
if len(self.entity_dict) != len(other.entity_dict):
return False
for eid, e in self.entity_dict.items():
if eid not in other.entity_dict:
return False
if not e.__eq__(other[eid], deep=deep):
return False
for r in other.relationships:
if r not in other.relationships:
return False
return True
def __ne__(self, other, deep=False):
return not self.__eq__(other, deep=deep)
def __getitem__(self, entity_id):
"""Get entity instance from entityset
Args:
entity_id (str): Id of entity.
Returns:
:class:`.Entity` : Instance of entity. None if entity doesn't
exist.
"""
if entity_id in self.entity_dict:
return self.entity_dict[entity_id]
name = self.id or "entity set"
raise KeyError('Entity %s does not exist in %s' % (entity_id, name))
@property
def entities(self):
return list(self.entity_dict.values())
@property
def metadata(self):
'''Returns the metadata for this EntitySet. The metadata will be recomputed if it does not exist.'''
if self._data_description is None:
description = serialize.entityset_to_description(self)
self._data_description = deserialize.description_to_entityset(description)
return self._data_description
def reset_data_description(self):
self._data_description = None
def to_pickle(self, path, compression=None, profile_name=None):
'''Write entityset in the pickle format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='pickle', compression=compression, profile_name=profile_name)
return self
def to_parquet(self, path, engine='auto', compression=None, profile_name=None):
'''Write entityset to disk in the parquet format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
engine (str) : Name of the engine to use. Possible values are: {'auto', 'pyarrow', 'fastparquet'}.
compression (str) : Name of the compression to use. Possible values are: {'snappy', 'gzip', 'brotli', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='parquet', engine=engine, compression=compression, profile_name=profile_name)
return self
def to_csv(self, path, sep=',', encoding='utf-8', engine='python', compression=None, profile_name=None):
'''Write entityset to disk in the csv format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str) : Location on disk to write to (will be created as a directory)
sep (str) : String of length 1. Field delimiter for the output file.
encoding (str) : A string representing the encoding to use in the output file, defaults to 'utf-8'.
engine (str) : Name of the engine to use. Possible values are: {'c', 'python'}.
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='csv', index=False, sep=sep, encoding=encoding, engine=engine, compression=compression, profile_name=profile_name)
return self
def to_dictionary(self):
return serialize.entityset_to_description(self)
###########################################################################
# Public getter/setter methods #########################################
###########################################################################
def __repr__(self):
repr_out = u"Entityset: {}\n".format(self.id)
repr_out += u" Entities:"
for e in self.entities:
if e.df.shape:
repr_out += u"\n {} [Rows: {}, Columns: {}]".format(
e.id, e.df.shape[0], e.df.shape[1])
else:
repr_out += u"\n {} [Rows: None, Columns: None]".format(
e.id)
repr_out += "\n Relationships:"
if len(self.relationships) == 0:
repr_out += u"\n No relationships"
for r in self.relationships:
repr_out += u"\n %s.%s -> %s.%s" % \
(r._child_entity_id, r._child_variable_id,
r._parent_entity_id, r._parent_variable_id)
return repr_out
def add_relationships(self, relationships):
"""Add multiple new relationships to a entityset
Args:
relationships (list[Relationship]) : List of new
relationships.
"""
return [self.add_relationship(r) for r in relationships][-1]
def add_relationship(self, relationship):
"""Add a new relationship between entities in the entityset
Args:
relationship (Relationship) : Instance of new
relationship to be added.
"""
if relationship in self.relationships:
logger.warning(
"Not adding duplicate relationship: %s", relationship)
return self
# _operations?
# this is a new pair of entities
child_e = relationship.child_entity
child_v = relationship.child_variable.id
parent_e = relationship.parent_entity
parent_v = relationship.parent_variable.id
if not isinstance(child_e[child_v], vtypes.Id):
child_e.convert_variable_type(variable_id=child_v,
new_type=vtypes.Id,
convert_data=False)
if not isinstance(parent_e[parent_v], vtypes.Index):
parent_e.convert_variable_type(variable_id=parent_v,
new_type=vtypes.Index,
convert_data=False)
# Empty dataframes (as a result of accessing Entity.metadata)
# default to object dtypes for discrete variables, but
# indexes/ids default to ints. In this case, we convert
# the empty column's type to int
if isinstance(child_e.df, pd.DataFrame) and \
(child_e.df.empty and child_e.df[child_v].dtype == object and
is_numeric_dtype(parent_e.df[parent_v])):
child_e.df[child_v] = pd.Series(name=child_v, dtype=np.int64)
parent_dtype = parent_e.df[parent_v].dtype
child_dtype = child_e.df[child_v].dtype
msg = u"Unable to add relationship because {} in {} is Pandas dtype {}"\
u" and {} in {} is Pandas dtype {}."
if not is_dtype_equal(parent_dtype, child_dtype):
raise ValueError(msg.format(parent_v, parent_e.id, parent_dtype,
child_v, child_e.id, child_dtype))
self.relationships.append(relationship)
self.reset_data_description()
return self
###########################################################################
# Relationship access/helper methods ###################################
###########################################################################
def find_forward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all forward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : id of entity to start the search from
goal_entity_id (str) : if of entity to find forward path to
See Also:
:func:`BaseEntitySet.find_backward_paths`
"""
for sub_entity_id, path in self._forward_entity_paths(start_entity_id):
if sub_entity_id == goal_entity_id:
yield path
def find_backward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all backward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : Id of entity to start the search from.
goal_entity_id (str) : Id of entity to find backward path to.
See Also:
:func:`BaseEntitySet.find_forward_paths`
"""
for path in self.find_forward_paths(goal_entity_id, start_entity_id):
# Reverse path
yield path[::-1]
def _forward_entity_paths(self, start_entity_id, seen_entities=None):
"""
Generator which yields the ids of all entities connected through forward
relationships, and the path taken to each. An entity will be yielded
multiple times if there are multiple paths to it.
Implemented using depth first search.
"""
if seen_entities is None:
seen_entities = set()
if start_entity_id in seen_entities:
return
seen_entities.add(start_entity_id)
yield start_entity_id, []
for relationship in self.get_forward_relationships(start_entity_id):
next_entity = relationship.parent_entity.id
# Copy seen entities for each next node to allow multiple paths (but
# not cycles).
descendants = self._forward_entity_paths(next_entity, seen_entities.copy())
for sub_entity_id, sub_path in descendants:
yield sub_entity_id, [relationship] + sub_path
def get_forward_entities(self, entity_id, deep=False):
"""
Get entities that are in a forward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find forward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_forward_relationships(entity_id):
parent_eid = relationship.parent_entity.id
direct_path = RelationshipPath([(True, relationship)])
yield parent_eid, direct_path
if deep:
sub_entities = self.get_forward_entities(parent_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_backward_entities(self, entity_id, deep=False):
"""
Get entities that are in a backward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find backward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_backward_relationships(entity_id):
child_eid = relationship.child_entity.id
direct_path = RelationshipPath([(False, relationship)])
yield child_eid, direct_path
if deep:
sub_entities = self.get_backward_entities(child_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_forward_relationships(self, entity_id):
"""Get relationships where entity "entity_id" is the child
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: List of forward relationships.
"""
return [r for r in self.relationships if r.child_entity.id == entity_id]
def get_backward_relationships(self, entity_id):
"""
get relationships where entity "entity_id" is the parent.
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: list of backward relationships
"""
return [r for r in self.relationships if r.parent_entity.id == entity_id]
def has_unique_forward_path(self, start_entity_id, end_entity_id):
"""
Is the forward path from start to end unique?
This will raise if there is no such path.
"""
paths = self.find_forward_paths(start_entity_id, end_entity_id)
next(paths)
second_path = next(paths, None)
return not second_path
###########################################################################
# Entity creation methods ##############################################
###########################################################################
def entity_from_dataframe(self,
entity_id,
dataframe,
index=None,
variable_types=None,
make_index=False,
time_index=None,
secondary_time_index=None,
already_sorted=False):
"""
Load the data for a specified entity from a Pandas DataFrame.
Args:
entity_id (str) : Unique id to associate with this entity.
dataframe (pandas.DataFrame) : Dataframe containing the data.
index (str, optional): Name of the variable used to index the entity.
If None, take the first column.
variable_types (dict[str -> Variable/str], optional):
Keys are of variable ids and values are variable types or type_strings. Used to to
initialize an entity's store.
make_index (bool, optional) : If True, assume index does not
exist as a column in dataframe, and create a new column of that name
using integers. Otherwise, assume index exists.
time_index (str, optional): Name of the variable containing
time data. Type must be in :class:`variables.DateTime` or be
able to be cast to datetime (e.g. str, float, or numeric.)
secondary_time_index (dict[str -> Variable]): Name of variable
containing time data to use a second time index for the entity.
already_sorted (bool, optional) : If True, assumes that input dataframe
is already sorted by time. Defaults to False.
Notes:
Will infer variable types from Pandas dtype
Example:
.. ipython:: python
import featuretools as ft
import pandas as pd
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"session_id": [1, 2, 1, 3, 4, 5],
"amount": [100.40, 20.63, 33.32, 13.12, 67.22, 1.00],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"fraud": [True, False, True, False, True, True]})
es = ft.EntitySet("example")
es.entity_from_dataframe(entity_id="transactions",
index="id",
time_index="transaction_time",
dataframe=transactions_df)
es["transactions"]
es["transactions"].df
"""
variable_types = variable_types or {}
if time_index is not None and time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (time_index))
if time_index is None:
for variable, variable_type in variable_types.items():
if variable_type == vtypes.DatetimeTimeIndex:
raise ValueError("DatetimeTimeIndex variable %s must be set using time_index parameter" % (variable))
if len(self.entities) > 0:
if not isinstance(dataframe, type(self.entities[0].df)):
raise ValueError("All entity dataframes must be of the same type. "
"Cannot add entity of type {} to an entityset with existing entities "
"of type {}".format(type(dataframe), type(self.entities[0].df)))
entity = Entity(
entity_id,
dataframe,
self,
variable_types=variable_types,
index=index,
time_index=time_index,
secondary_time_index=secondary_time_index,
already_sorted=already_sorted,
make_index=make_index)
self.entity_dict[entity.id] = entity
self.reset_data_description()
return self
def normalize_entity(self, base_entity_id, new_entity_id, index,
additional_variables=None, copy_variables=None,
make_time_index=None,
make_secondary_time_index=None,
new_entity_time_index=None,
new_entity_secondary_time_index=None):
"""Create a new entity and relationship from unique values of an existing variable.
Args:
base_entity_id (str) : Entity id from which to split.
new_entity_id (str): Id of the new entity.
index (str): Variable in old entity
that will become index of new entity. Relationship
will be created across this variable.
additional_variables (list[str]):
List of variable ids to remove from
base_entity and move to new entity.
copy_variables (list[str]): List of
variable ids to copy from old entity
and move to new entity.
make_time_index (bool or str, optional): Create time index for new entity based
on time index in base_entity, optionally specifying which variable in base_entity
to use for time_index. If specified as True without a specific variable,
uses the primary time index. Defaults to True if base entity has a time index.
make_secondary_time_index (dict[str -> list[str]], optional): Create a secondary time index
from key. Values of dictionary
are the variables to associate with the secondary time index. Only one
secondary time index is allowed. If None, only associate the time index.
new_entity_time_index (str, optional): Rename new entity time index.
new_entity_secondary_time_index (str, optional): Rename new entity secondary time index.
"""
base_entity = self.entity_dict[base_entity_id]
additional_variables = additional_variables or []
copy_variables = copy_variables or []
# Check base entity to make sure time index is valid
if base_entity.time_index is not None:
t_index = base_entity[base_entity.time_index]
if not isinstance(t_index, (vtypes.NumericTimeIndex, vtypes.DatetimeTimeIndex)):
base_error = "Time index '{0}' is not a NumericTimeIndex or DatetimeTimeIndex, but type {1}. Use set_time_index on entity '{2}' to set the time_index."
raise TypeError(base_error.format(base_entity.time_index, type(t_index), str(base_entity.id)))
if not isinstance(additional_variables, list):
raise TypeError("'additional_variables' must be a list, but received type {}"
.format(type(additional_variables)))
if len(additional_variables) != len(set(additional_variables)):
raise ValueError("'additional_variables' contains duplicate variables. All variables must be unique.")
if not isinstance(copy_variables, list):
raise TypeError("'copy_variables' must be a list, but received type {}"
.format(type(copy_variables)))
if len(copy_variables) != len(set(copy_variables)):
raise ValueError("'copy_variables' contains duplicate variables. All variables must be unique.")
for v in additional_variables + copy_variables:
if v == index:
raise ValueError("Not copying {} as both index and variable".format(v))
for v in additional_variables:
if v == base_entity.time_index:
raise ValueError("Not moving {} as it is the base time index variable. Perhaps, move the variable to the copy_variables.".format(v))
if isinstance(make_time_index, str):
if make_time_index not in base_entity.df.columns:
raise ValueError("'make_time_index' must be a variable in the base entity")
elif make_time_index not in additional_variables + copy_variables:
raise ValueError("'make_time_index' must be specified in 'additional_variables' or 'copy_variables'")
if index == base_entity.index:
raise ValueError("'index' must be different from the index column of the base entity")
transfer_types = {}
transfer_types[index] = type(base_entity[index])
for v in additional_variables + copy_variables:
if type(base_entity[v]) == vtypes.DatetimeTimeIndex:
transfer_types[v] = vtypes.Datetime
elif type(base_entity[v]) == vtypes.NumericTimeIndex:
transfer_types[v] = vtypes.Numeric
else:
transfer_types[v] = type(base_entity[v])
# create and add new entity
new_entity_df = self[base_entity_id].df.copy()
if make_time_index is None and base_entity.time_index is not None:
make_time_index = True
if isinstance(make_time_index, str):
# Set the new time index to make_time_index.
base_time_index = make_time_index
new_entity_time_index = make_time_index
already_sorted = (new_entity_time_index == base_entity.time_index)
elif make_time_index:
# Create a new time index based on the base entity time index.
base_time_index = base_entity.time_index
if new_entity_time_index is None:
new_entity_time_index = "first_%s_time" % (base_entity.id)
already_sorted = True
assert base_entity.time_index is not None, \
"Base entity doesn't have time_index defined"
if base_time_index not in [v for v in additional_variables]:
copy_variables.append(base_time_index)
transfer_types[new_entity_time_index] = type(base_entity[base_entity.time_index])
else:
new_entity_time_index = None
already_sorted = False
if new_entity_time_index is not None and new_entity_time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (new_entity_time_index))
selected_variables = [index] +\
[v for v in additional_variables] +\
[v for v in copy_variables]
new_entity_df2 = new_entity_df. \
drop_duplicates(index, keep='first')[selected_variables]
if make_time_index:
new_entity_df2 = new_entity_df2.rename(columns={base_time_index: new_entity_time_index})
if make_secondary_time_index:
assert len(make_secondary_time_index) == 1, "Can only provide 1 secondary time index"
secondary_time_index = list(make_secondary_time_index.keys())[0]
secondary_variables = [index, secondary_time_index] + list(make_secondary_time_index.values())[0]
secondary_df = new_entity_df. \
drop_duplicates(index, keep='last')[secondary_variables]
if new_entity_secondary_time_index:
secondary_df = secondary_df.rename(columns={secondary_time_index: new_entity_secondary_time_index})
secondary_time_index = new_entity_secondary_time_index
else:
new_entity_secondary_time_index = secondary_time_index
secondary_df = secondary_df.set_index(index)
new_entity_df = new_entity_df2.join(secondary_df, on=index)
else:
new_entity_df = new_entity_df2
base_entity_index = index
transfer_types[index] = vtypes.Categorical
if make_secondary_time_index:
old_ti_name = list(make_secondary_time_index.keys())[0]
ti_cols = list(make_secondary_time_index.values())[0]
ti_cols = [c if c != old_ti_name else secondary_time_index for c in ti_cols]
make_secondary_time_index = {secondary_time_index: ti_cols}
self.entity_from_dataframe(
new_entity_id,
new_entity_df,
index,
already_sorted=already_sorted,
time_index=new_entity_time_index,
secondary_time_index=make_secondary_time_index,
variable_types=transfer_types)
self.entity_dict[base_entity_id].delete_variables(additional_variables)
new_entity = self.entity_dict[new_entity_id]
base_entity.convert_variable_type(base_entity_index, vtypes.Id, convert_data=False)
self.add_relationship(Relationship(new_entity[index], base_entity[base_entity_index]))
self.reset_data_description()
return self
###########################################################################
# Data wrangling methods ###############################################
###########################################################################
def concat(self, other, inplace=False):
'''Combine entityset with another to create a new entityset with the
combined data of both entitysets.
'''
assert_string = "Entitysets must have the same entities, relationships"\
", and variable_ids"
assert (self.__eq__(other) and
self.relationships == other.relationships), assert_string
for entity in self.entities:
assert entity.id in other.entity_dict, assert_string
assert (len(self[entity.id].variables) ==
len(other[entity.id].variables)), assert_string
other_variable_ids = [o_variable.id for o_variable in
other[entity.id].variables]
assert (all([variable.id in other_variable_ids
for variable in self[entity.id].variables])), assert_string
if inplace:
combined_es = self
else:
combined_es = copy.deepcopy(self)
has_last_time_index = []
for entity in self.entities:
self_df = entity.df
other_df = other[entity.id].df
combined_df = pd.concat([self_df, other_df])
if entity.created_index == entity.index:
columns = [col for col in combined_df.columns if
col != entity.index or col != entity.time_index]
else:
columns = [entity.index]
combined_df.drop_duplicates(columns, inplace=True)
if entity.time_index:
combined_df.sort_values([entity.time_index, entity.index], inplace=True)
else:
combined_df.sort_index(inplace=True)
if (entity.last_time_index is not None or
other[entity.id].last_time_index is not None):
has_last_time_index.append(entity.id)
combined_es[entity.id].update_data(df=combined_df,
recalculate_last_time_indexes=False)
combined_es.add_last_time_indexes(updated_entities=has_last_time_index)
self.reset_data_description()
return combined_es
###########################################################################
# Indexing methods ###############################################
###########################################################################
def add_last_time_indexes(self, updated_entities=None):
"""
Calculates the last time index values for each entity (the last time
an instance or children of that instance were observed). Used when
calculating features using training windows
Args:
updated_entities (list[str]): List of entity ids to update last_time_index for
(will update all parents of those entities as well)
"""
# Generate graph of entities to find leaf entities
children = defaultdict(list) # parent --> child mapping
child_vars = defaultdict(dict)
for r in self.relationships:
children[r.parent_entity.id].append(r.child_entity)
child_vars[r.parent_entity.id][r.child_entity.id] = r.child_variable
updated_entities = updated_entities or []
if updated_entities:
# find parents of updated_entities
parent_queue = updated_entities[:]
parents = set()
while len(parent_queue):
e = parent_queue.pop(0)
if e in parents:
continue
parents.add(e)
for parent_id, _ in self.get_forward_entities(e):
parent_queue.append(parent_id)
queue = [self[p] for p in parents]
to_explore = parents
else:
to_explore = set([e.id for e in self.entities[:]])
queue = self.entities[:]
explored = set()
for e in queue:
e.last_time_index = None
# We will explore children of entities on the queue,
# which may not be in the to_explore set. Therefore,
# we check whether all elements of to_explore are in
# explored, rather than just comparing length
while not to_explore.issubset(explored):
entity = queue.pop(0)
if entity.last_time_index is None:
if entity.time_index is not None:
lti = entity.df[entity.time_index].copy()
if isinstance(entity.df, dd.DataFrame):
# The current Dask implementation doesn't set the index of the dataframe
# to the entity's index, so we have to do it manually here
lti.index = entity.df[entity.index].copy()
else:
lti = entity.df[entity.index].copy()
if isinstance(entity.df, dd.DataFrame):
lti.index = entity.df[entity.index].copy()
lti = lti.apply(lambda x: None)
else:
lti[:] = None
entity.last_time_index = lti
if entity.id in children:
child_entities = children[entity.id]
# if all children not explored, skip for now
if not set([e.id for e in child_entities]).issubset(explored):
# Now there is a possibility that a child entity
# was not explicitly provided in updated_entities,
# and never made it onto the queue. If updated_entities
# is None then we just load all entities onto the queue
# so we didn't need this logic
for e in child_entities:
if e.id not in explored and e.id not in [q.id for q in queue]:
queue.append(e)
queue.append(entity)
continue
# updated last time from all children
for child_e in child_entities:
if child_e.last_time_index is None:
continue
link_var = child_vars[entity.id][child_e.id].id
if isinstance(child_e.last_time_index, dd.Series):
to_join = child_e.df[link_var]
to_join.index = child_e.df[child_e.index]
lti_df = child_e.last_time_index.to_frame(name='last_time').join(
to_join.to_frame(name=entity.index)
)
new_index = lti_df.index.copy()
new_index.name = None
lti_df.index = new_index
lti_df = lti_df.groupby(lti_df[entity.index]).agg('max')
lti_df = entity.last_time_index.to_frame(name='last_time_old').join(lti_df)
else:
lti_df = pd.DataFrame({'last_time': child_e.last_time_index,
entity.index: child_e.df[link_var]})
# sort by time and keep only the most recent
lti_df.sort_values(['last_time', entity.index],
kind="mergesort", inplace=True)
lti_df.drop_duplicates(entity.index,
keep='last',
inplace=True)
lti_df.set_index(entity.index, inplace=True)
lti_df = lti_df.reindex(entity.last_time_index.index)
lti_df['last_time_old'] = entity.last_time_index
if not isinstance(lti_df, dd.DataFrame) and lti_df.empty:
# Pandas errors out if it tries to do fillna and then max on an empty dataframe
lti_df = pd.Series()
else:
lti_df['last_time'] = lti_df['last_time'].astype('datetime64[ns]')
lti_df['last_time_old'] = lti_df['last_time_old'].astype('datetime64[ns]')
lti_df = lti_df.fillna(pd.to_datetime('1800-01-01 00:00')).max(axis=1)
lti_df = lti_df.replace(pd.to_datetime('1800-01-01 00:00'), pd.NaT)
# lti_df = lti_df.apply(lambda x: x.dropna().max(), axis=1)
entity.last_time_index = lti_df
entity.last_time_index.name = 'last_time'
explored.add(entity.id)
self.reset_data_description()
###########################################################################
# Other ###############################################
###########################################################################
def add_interesting_values(self, max_values=5, verbose=False):
"""Find interesting values for categorical variables, to be used to generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for entity in self.entities:
entity.add_interesting_values(max_values=max_values, verbose=verbose)
self.reset_data_description()
def plot(self, to_file=None):
"""
Create a UML diagram-ish graph of the EntitySet.
Args:
to_file (str, optional) : Path to where the plot should be saved.
If set to None (as by default), the plot will not be saved.
Returns:
graphviz.Digraph : Graph object that can directly be displayed in
Jupyter notebooks.
"""
GRAPHVIZ_ERR_MSG = ('Please install graphviz to plot entity sets.' +
' (See https://docs.featuretools.com/en/stable/getting_started/install.html#installing-graphviz for' +
' details)')
graphviz = import_or_raise("graphviz", GRAPHVIZ_ERR_MSG)
# Try rendering a dummy graph to see if a working backend is installed
try:
graphviz.Digraph().pipe()
except graphviz.backend.ExecutableNotFound:
raise RuntimeError(
"To plot entity sets, a graphviz backend is required.\n" +
"Install the backend using one of the following commands:\n" +
" Mac OS: brew install graphviz\n" +
" Linux (Ubuntu): sudo apt-get install graphviz\n" +
" Windows: conda install python-graphviz\n" +
" For more details visit: https://docs.featuretools.com/en/stable/getting_started/install.html"
)
if to_file:
# Explicitly cast to str in case a Path object was passed in
to_file = str(to_file)
split_path = to_file.split('.')
if len(split_path) < 2:
raise ValueError("Please use a file extension like '.pdf'" +
" so that the format can be inferred")
format = split_path[-1]
valid_formats = graphviz.backend.FORMATS
if format not in valid_formats:
raise ValueError("Unknown format. Make sure your format is" +
" amongst the following: %s" % valid_formats)
else:
format = None
# Initialize a new directed graph
graph = graphviz.Digraph(self.id, format=format,
graph_attr={'splines': 'ortho'})
# Draw entities
for entity in self.entities:
variables_string = '\l'.join([var.id + ' : ' + var.type_string # noqa: W605
for var in entity.variables])
nrows = entity.shape[0]
label = '{%s (%d row%s)|%s\l}' % (entity.id, nrows, 's' * (nrows > 1), variables_string) # noqa: W605
graph.node(entity.id, shape='record', label=label)
# Draw relationships
for rel in self.relationships:
# Display the key only once if is the same for both related entities
if rel._parent_variable_id == rel._child_variable_id:
label = rel._parent_variable_id
else:
label = '%s -> %s' % (rel._parent_variable_id,
rel._child_variable_id)
graph.edge(rel._child_entity_id, rel._parent_entity_id, xlabel=label)
if to_file:
# Graphviz always appends the format to the file name, so we need to
# remove it manually to avoid file names like 'file_name.pdf.pdf'
offset = len(format) + 1 # Add 1 for the dot
output_path = to_file[:-offset]
graph.render(output_path, cleanup=True)
return graph
|
add_relationship | Add a new relationship between entities in the entityset
Args:
relationship (Relationship) : Instance of new
relationship to be added. | import copy
import logging
from collections import defaultdict
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal, is_numeric_dtype
import featuretools.variable_types.variable as vtypes
from featuretools.entityset import deserialize, serialize
from featuretools.entityset.entity import Entity
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.utils.gen_utils import import_or_raise
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger('featuretools.entityset')
class EntitySet(object):
"""
Stores all actual data for a entityset
Attributes:
id
entity_dict
relationships
time_type
Properties:
metadata
"""
def __init__(self, id=None, entities=None, relationships=None):
"""Creates EntitySet
Args:
id (str) : Unique identifier to associate with this instance
entities (dict[str -> tuple(pd.DataFrame, str, str, dict[str -> Variable])]): dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_index), (variable_types), (make_index))}.
Note that time_index, variable_types and make_index are optional.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
Example:
.. code-block:: python
entities = {
"cards" : (card_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
ft.EntitySet("my-entity-set", entities, relationships)
"""
self.id = id
self.entity_dict = {}
self.relationships = []
self.time_type = None
entities = entities or {}
relationships = relationships or []
for entity in entities:
df = entities[entity][0]
index_column = entities[entity][1]
time_index = None
variable_types = None
make_index = None
if len(entities[entity]) > 2:
time_index = entities[entity][2]
if len(entities[entity]) > 3:
variable_types = entities[entity][3]
if len(entities[entity]) > 4:
make_index = entities[entity][4]
self.entity_from_dataframe(entity_id=entity,
dataframe=df,
index=index_column,
time_index=time_index,
variable_types=variable_types,
make_index=make_index)
for relationship in relationships:
parent_variable = self[relationship[0]][relationship[1]]
child_variable = self[relationship[2]][relationship[3]]
self.add_relationship(Relationship(parent_variable,
child_variable))
self.reset_data_description()
def __sizeof__(self):
return sum([entity.__sizeof__() for entity in self.entities])
def __dask_tokenize__(self):
return (EntitySet, serialize.entityset_to_description(self.metadata))
def __eq__(self, other, deep=False):
if len(self.entity_dict) != len(other.entity_dict):
return False
for eid, e in self.entity_dict.items():
if eid not in other.entity_dict:
return False
if not e.__eq__(other[eid], deep=deep):
return False
for r in other.relationships:
if r not in other.relationships:
return False
return True
def __ne__(self, other, deep=False):
return not self.__eq__(other, deep=deep)
def __getitem__(self, entity_id):
"""Get entity instance from entityset
Args:
entity_id (str): Id of entity.
Returns:
:class:`.Entity` : Instance of entity. None if entity doesn't
exist.
"""
if entity_id in self.entity_dict:
return self.entity_dict[entity_id]
name = self.id or "entity set"
raise KeyError('Entity %s does not exist in %s' % (entity_id, name))
@property
def entities(self):
return list(self.entity_dict.values())
@property
def metadata(self):
'''Returns the metadata for this EntitySet. The metadata will be recomputed if it does not exist.'''
if self._data_description is None:
description = serialize.entityset_to_description(self)
self._data_description = deserialize.description_to_entityset(description)
return self._data_description
def reset_data_description(self):
self._data_description = None
def to_pickle(self, path, compression=None, profile_name=None):
'''Write entityset in the pickle format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='pickle', compression=compression, profile_name=profile_name)
return self
def to_parquet(self, path, engine='auto', compression=None, profile_name=None):
'''Write entityset to disk in the parquet format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
engine (str) : Name of the engine to use. Possible values are: {'auto', 'pyarrow', 'fastparquet'}.
compression (str) : Name of the compression to use. Possible values are: {'snappy', 'gzip', 'brotli', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='parquet', engine=engine, compression=compression, profile_name=profile_name)
return self
def to_csv(self, path, sep=',', encoding='utf-8', engine='python', compression=None, profile_name=None):
'''Write entityset to disk in the csv format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str) : Location on disk to write to (will be created as a directory)
sep (str) : String of length 1. Field delimiter for the output file.
encoding (str) : A string representing the encoding to use in the output file, defaults to 'utf-8'.
engine (str) : Name of the engine to use. Possible values are: {'c', 'python'}.
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='csv', index=False, sep=sep, encoding=encoding, engine=engine, compression=compression, profile_name=profile_name)
return self
def to_dictionary(self):
return serialize.entityset_to_description(self)
###########################################################################
# Public getter/setter methods #########################################
###########################################################################
def __repr__(self):
repr_out = u"Entityset: {}\n".format(self.id)
repr_out += u" Entities:"
for e in self.entities:
if e.df.shape:
repr_out += u"\n {} [Rows: {}, Columns: {}]".format(
e.id, e.df.shape[0], e.df.shape[1])
else:
repr_out += u"\n {} [Rows: None, Columns: None]".format(
e.id)
repr_out += "\n Relationships:"
if len(self.relationships) == 0:
repr_out += u"\n No relationships"
for r in self.relationships:
repr_out += u"\n %s.%s -> %s.%s" % \
(r._child_entity_id, r._child_variable_id,
r._parent_entity_id, r._parent_variable_id)
return repr_out
def add_relationships(self, relationships):
"""Add multiple new relationships to a entityset
Args:
relationships (list[Relationship]) : List of new
relationships.
"""
return [self.add_relationship(r) for r in relationships][-1]
# MASKED: add_relationship function (lines 230-277)
###########################################################################
# Relationship access/helper methods ###################################
###########################################################################
def find_forward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all forward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : id of entity to start the search from
goal_entity_id (str) : if of entity to find forward path to
See Also:
:func:`BaseEntitySet.find_backward_paths`
"""
for sub_entity_id, path in self._forward_entity_paths(start_entity_id):
if sub_entity_id == goal_entity_id:
yield path
def find_backward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all backward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : Id of entity to start the search from.
goal_entity_id (str) : Id of entity to find backward path to.
See Also:
:func:`BaseEntitySet.find_forward_paths`
"""
for path in self.find_forward_paths(goal_entity_id, start_entity_id):
# Reverse path
yield path[::-1]
def _forward_entity_paths(self, start_entity_id, seen_entities=None):
"""
Generator which yields the ids of all entities connected through forward
relationships, and the path taken to each. An entity will be yielded
multiple times if there are multiple paths to it.
Implemented using depth first search.
"""
if seen_entities is None:
seen_entities = set()
if start_entity_id in seen_entities:
return
seen_entities.add(start_entity_id)
yield start_entity_id, []
for relationship in self.get_forward_relationships(start_entity_id):
next_entity = relationship.parent_entity.id
# Copy seen entities for each next node to allow multiple paths (but
# not cycles).
descendants = self._forward_entity_paths(next_entity, seen_entities.copy())
for sub_entity_id, sub_path in descendants:
yield sub_entity_id, [relationship] + sub_path
def get_forward_entities(self, entity_id, deep=False):
"""
Get entities that are in a forward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find forward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_forward_relationships(entity_id):
parent_eid = relationship.parent_entity.id
direct_path = RelationshipPath([(True, relationship)])
yield parent_eid, direct_path
if deep:
sub_entities = self.get_forward_entities(parent_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_backward_entities(self, entity_id, deep=False):
"""
Get entities that are in a backward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find backward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_backward_relationships(entity_id):
child_eid = relationship.child_entity.id
direct_path = RelationshipPath([(False, relationship)])
yield child_eid, direct_path
if deep:
sub_entities = self.get_backward_entities(child_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_forward_relationships(self, entity_id):
"""Get relationships where entity "entity_id" is the child
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: List of forward relationships.
"""
return [r for r in self.relationships if r.child_entity.id == entity_id]
def get_backward_relationships(self, entity_id):
"""
get relationships where entity "entity_id" is the parent.
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: list of backward relationships
"""
return [r for r in self.relationships if r.parent_entity.id == entity_id]
def has_unique_forward_path(self, start_entity_id, end_entity_id):
"""
Is the forward path from start to end unique?
This will raise if there is no such path.
"""
paths = self.find_forward_paths(start_entity_id, end_entity_id)
next(paths)
second_path = next(paths, None)
return not second_path
###########################################################################
# Entity creation methods ##############################################
###########################################################################
def entity_from_dataframe(self,
entity_id,
dataframe,
index=None,
variable_types=None,
make_index=False,
time_index=None,
secondary_time_index=None,
already_sorted=False):
"""
Load the data for a specified entity from a Pandas DataFrame.
Args:
entity_id (str) : Unique id to associate with this entity.
dataframe (pandas.DataFrame) : Dataframe containing the data.
index (str, optional): Name of the variable used to index the entity.
If None, take the first column.
variable_types (dict[str -> Variable/str], optional):
Keys are of variable ids and values are variable types or type_strings. Used to to
initialize an entity's store.
make_index (bool, optional) : If True, assume index does not
exist as a column in dataframe, and create a new column of that name
using integers. Otherwise, assume index exists.
time_index (str, optional): Name of the variable containing
time data. Type must be in :class:`variables.DateTime` or be
able to be cast to datetime (e.g. str, float, or numeric.)
secondary_time_index (dict[str -> Variable]): Name of variable
containing time data to use a second time index for the entity.
already_sorted (bool, optional) : If True, assumes that input dataframe
is already sorted by time. Defaults to False.
Notes:
Will infer variable types from Pandas dtype
Example:
.. ipython:: python
import featuretools as ft
import pandas as pd
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"session_id": [1, 2, 1, 3, 4, 5],
"amount": [100.40, 20.63, 33.32, 13.12, 67.22, 1.00],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"fraud": [True, False, True, False, True, True]})
es = ft.EntitySet("example")
es.entity_from_dataframe(entity_id="transactions",
index="id",
time_index="transaction_time",
dataframe=transactions_df)
es["transactions"]
es["transactions"].df
"""
variable_types = variable_types or {}
if time_index is not None and time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (time_index))
if time_index is None:
for variable, variable_type in variable_types.items():
if variable_type == vtypes.DatetimeTimeIndex:
raise ValueError("DatetimeTimeIndex variable %s must be set using time_index parameter" % (variable))
if len(self.entities) > 0:
if not isinstance(dataframe, type(self.entities[0].df)):
raise ValueError("All entity dataframes must be of the same type. "
"Cannot add entity of type {} to an entityset with existing entities "
"of type {}".format(type(dataframe), type(self.entities[0].df)))
entity = Entity(
entity_id,
dataframe,
self,
variable_types=variable_types,
index=index,
time_index=time_index,
secondary_time_index=secondary_time_index,
already_sorted=already_sorted,
make_index=make_index)
self.entity_dict[entity.id] = entity
self.reset_data_description()
return self
def normalize_entity(self, base_entity_id, new_entity_id, index,
additional_variables=None, copy_variables=None,
make_time_index=None,
make_secondary_time_index=None,
new_entity_time_index=None,
new_entity_secondary_time_index=None):
"""Create a new entity and relationship from unique values of an existing variable.
Args:
base_entity_id (str) : Entity id from which to split.
new_entity_id (str): Id of the new entity.
index (str): Variable in old entity
that will become index of new entity. Relationship
will be created across this variable.
additional_variables (list[str]):
List of variable ids to remove from
base_entity and move to new entity.
copy_variables (list[str]): List of
variable ids to copy from old entity
and move to new entity.
make_time_index (bool or str, optional): Create time index for new entity based
on time index in base_entity, optionally specifying which variable in base_entity
to use for time_index. If specified as True without a specific variable,
uses the primary time index. Defaults to True if base entity has a time index.
make_secondary_time_index (dict[str -> list[str]], optional): Create a secondary time index
from key. Values of dictionary
are the variables to associate with the secondary time index. Only one
secondary time index is allowed. If None, only associate the time index.
new_entity_time_index (str, optional): Rename new entity time index.
new_entity_secondary_time_index (str, optional): Rename new entity secondary time index.
"""
base_entity = self.entity_dict[base_entity_id]
additional_variables = additional_variables or []
copy_variables = copy_variables or []
# Check base entity to make sure time index is valid
if base_entity.time_index is not None:
t_index = base_entity[base_entity.time_index]
if not isinstance(t_index, (vtypes.NumericTimeIndex, vtypes.DatetimeTimeIndex)):
base_error = "Time index '{0}' is not a NumericTimeIndex or DatetimeTimeIndex, but type {1}. Use set_time_index on entity '{2}' to set the time_index."
raise TypeError(base_error.format(base_entity.time_index, type(t_index), str(base_entity.id)))
if not isinstance(additional_variables, list):
raise TypeError("'additional_variables' must be a list, but received type {}"
.format(type(additional_variables)))
if len(additional_variables) != len(set(additional_variables)):
raise ValueError("'additional_variables' contains duplicate variables. All variables must be unique.")
if not isinstance(copy_variables, list):
raise TypeError("'copy_variables' must be a list, but received type {}"
.format(type(copy_variables)))
if len(copy_variables) != len(set(copy_variables)):
raise ValueError("'copy_variables' contains duplicate variables. All variables must be unique.")
for v in additional_variables + copy_variables:
if v == index:
raise ValueError("Not copying {} as both index and variable".format(v))
for v in additional_variables:
if v == base_entity.time_index:
raise ValueError("Not moving {} as it is the base time index variable. Perhaps, move the variable to the copy_variables.".format(v))
if isinstance(make_time_index, str):
if make_time_index not in base_entity.df.columns:
raise ValueError("'make_time_index' must be a variable in the base entity")
elif make_time_index not in additional_variables + copy_variables:
raise ValueError("'make_time_index' must be specified in 'additional_variables' or 'copy_variables'")
if index == base_entity.index:
raise ValueError("'index' must be different from the index column of the base entity")
transfer_types = {}
transfer_types[index] = type(base_entity[index])
for v in additional_variables + copy_variables:
if type(base_entity[v]) == vtypes.DatetimeTimeIndex:
transfer_types[v] = vtypes.Datetime
elif type(base_entity[v]) == vtypes.NumericTimeIndex:
transfer_types[v] = vtypes.Numeric
else:
transfer_types[v] = type(base_entity[v])
# create and add new entity
new_entity_df = self[base_entity_id].df.copy()
if make_time_index is None and base_entity.time_index is not None:
make_time_index = True
if isinstance(make_time_index, str):
# Set the new time index to make_time_index.
base_time_index = make_time_index
new_entity_time_index = make_time_index
already_sorted = (new_entity_time_index == base_entity.time_index)
elif make_time_index:
# Create a new time index based on the base entity time index.
base_time_index = base_entity.time_index
if new_entity_time_index is None:
new_entity_time_index = "first_%s_time" % (base_entity.id)
already_sorted = True
assert base_entity.time_index is not None, \
"Base entity doesn't have time_index defined"
if base_time_index not in [v for v in additional_variables]:
copy_variables.append(base_time_index)
transfer_types[new_entity_time_index] = type(base_entity[base_entity.time_index])
else:
new_entity_time_index = None
already_sorted = False
if new_entity_time_index is not None and new_entity_time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (new_entity_time_index))
selected_variables = [index] +\
[v for v in additional_variables] +\
[v for v in copy_variables]
new_entity_df2 = new_entity_df. \
drop_duplicates(index, keep='first')[selected_variables]
if make_time_index:
new_entity_df2 = new_entity_df2.rename(columns={base_time_index: new_entity_time_index})
if make_secondary_time_index:
assert len(make_secondary_time_index) == 1, "Can only provide 1 secondary time index"
secondary_time_index = list(make_secondary_time_index.keys())[0]
secondary_variables = [index, secondary_time_index] + list(make_secondary_time_index.values())[0]
secondary_df = new_entity_df. \
drop_duplicates(index, keep='last')[secondary_variables]
if new_entity_secondary_time_index:
secondary_df = secondary_df.rename(columns={secondary_time_index: new_entity_secondary_time_index})
secondary_time_index = new_entity_secondary_time_index
else:
new_entity_secondary_time_index = secondary_time_index
secondary_df = secondary_df.set_index(index)
new_entity_df = new_entity_df2.join(secondary_df, on=index)
else:
new_entity_df = new_entity_df2
base_entity_index = index
transfer_types[index] = vtypes.Categorical
if make_secondary_time_index:
old_ti_name = list(make_secondary_time_index.keys())[0]
ti_cols = list(make_secondary_time_index.values())[0]
ti_cols = [c if c != old_ti_name else secondary_time_index for c in ti_cols]
make_secondary_time_index = {secondary_time_index: ti_cols}
self.entity_from_dataframe(
new_entity_id,
new_entity_df,
index,
already_sorted=already_sorted,
time_index=new_entity_time_index,
secondary_time_index=make_secondary_time_index,
variable_types=transfer_types)
self.entity_dict[base_entity_id].delete_variables(additional_variables)
new_entity = self.entity_dict[new_entity_id]
base_entity.convert_variable_type(base_entity_index, vtypes.Id, convert_data=False)
self.add_relationship(Relationship(new_entity[index], base_entity[base_entity_index]))
self.reset_data_description()
return self
###########################################################################
# Data wrangling methods ###############################################
###########################################################################
def concat(self, other, inplace=False):
'''Combine entityset with another to create a new entityset with the
combined data of both entitysets.
'''
assert_string = "Entitysets must have the same entities, relationships"\
", and variable_ids"
assert (self.__eq__(other) and
self.relationships == other.relationships), assert_string
for entity in self.entities:
assert entity.id in other.entity_dict, assert_string
assert (len(self[entity.id].variables) ==
len(other[entity.id].variables)), assert_string
other_variable_ids = [o_variable.id for o_variable in
other[entity.id].variables]
assert (all([variable.id in other_variable_ids
for variable in self[entity.id].variables])), assert_string
if inplace:
combined_es = self
else:
combined_es = copy.deepcopy(self)
has_last_time_index = []
for entity in self.entities:
self_df = entity.df
other_df = other[entity.id].df
combined_df = pd.concat([self_df, other_df])
if entity.created_index == entity.index:
columns = [col for col in combined_df.columns if
col != entity.index or col != entity.time_index]
else:
columns = [entity.index]
combined_df.drop_duplicates(columns, inplace=True)
if entity.time_index:
combined_df.sort_values([entity.time_index, entity.index], inplace=True)
else:
combined_df.sort_index(inplace=True)
if (entity.last_time_index is not None or
other[entity.id].last_time_index is not None):
has_last_time_index.append(entity.id)
combined_es[entity.id].update_data(df=combined_df,
recalculate_last_time_indexes=False)
combined_es.add_last_time_indexes(updated_entities=has_last_time_index)
self.reset_data_description()
return combined_es
###########################################################################
# Indexing methods ###############################################
###########################################################################
def add_last_time_indexes(self, updated_entities=None):
"""
Calculates the last time index values for each entity (the last time
an instance or children of that instance were observed). Used when
calculating features using training windows
Args:
updated_entities (list[str]): List of entity ids to update last_time_index for
(will update all parents of those entities as well)
"""
# Generate graph of entities to find leaf entities
children = defaultdict(list) # parent --> child mapping
child_vars = defaultdict(dict)
for r in self.relationships:
children[r.parent_entity.id].append(r.child_entity)
child_vars[r.parent_entity.id][r.child_entity.id] = r.child_variable
updated_entities = updated_entities or []
if updated_entities:
# find parents of updated_entities
parent_queue = updated_entities[:]
parents = set()
while len(parent_queue):
e = parent_queue.pop(0)
if e in parents:
continue
parents.add(e)
for parent_id, _ in self.get_forward_entities(e):
parent_queue.append(parent_id)
queue = [self[p] for p in parents]
to_explore = parents
else:
to_explore = set([e.id for e in self.entities[:]])
queue = self.entities[:]
explored = set()
for e in queue:
e.last_time_index = None
# We will explore children of entities on the queue,
# which may not be in the to_explore set. Therefore,
# we check whether all elements of to_explore are in
# explored, rather than just comparing length
while not to_explore.issubset(explored):
entity = queue.pop(0)
if entity.last_time_index is None:
if entity.time_index is not None:
lti = entity.df[entity.time_index].copy()
if isinstance(entity.df, dd.DataFrame):
# The current Dask implementation doesn't set the index of the dataframe
# to the entity's index, so we have to do it manually here
lti.index = entity.df[entity.index].copy()
else:
lti = entity.df[entity.index].copy()
if isinstance(entity.df, dd.DataFrame):
lti.index = entity.df[entity.index].copy()
lti = lti.apply(lambda x: None)
else:
lti[:] = None
entity.last_time_index = lti
if entity.id in children:
child_entities = children[entity.id]
# if all children not explored, skip for now
if not set([e.id for e in child_entities]).issubset(explored):
# Now there is a possibility that a child entity
# was not explicitly provided in updated_entities,
# and never made it onto the queue. If updated_entities
# is None then we just load all entities onto the queue
# so we didn't need this logic
for e in child_entities:
if e.id not in explored and e.id not in [q.id for q in queue]:
queue.append(e)
queue.append(entity)
continue
# updated last time from all children
for child_e in child_entities:
if child_e.last_time_index is None:
continue
link_var = child_vars[entity.id][child_e.id].id
if isinstance(child_e.last_time_index, dd.Series):
to_join = child_e.df[link_var]
to_join.index = child_e.df[child_e.index]
lti_df = child_e.last_time_index.to_frame(name='last_time').join(
to_join.to_frame(name=entity.index)
)
new_index = lti_df.index.copy()
new_index.name = None
lti_df.index = new_index
lti_df = lti_df.groupby(lti_df[entity.index]).agg('max')
lti_df = entity.last_time_index.to_frame(name='last_time_old').join(lti_df)
else:
lti_df = pd.DataFrame({'last_time': child_e.last_time_index,
entity.index: child_e.df[link_var]})
# sort by time and keep only the most recent
lti_df.sort_values(['last_time', entity.index],
kind="mergesort", inplace=True)
lti_df.drop_duplicates(entity.index,
keep='last',
inplace=True)
lti_df.set_index(entity.index, inplace=True)
lti_df = lti_df.reindex(entity.last_time_index.index)
lti_df['last_time_old'] = entity.last_time_index
if not isinstance(lti_df, dd.DataFrame) and lti_df.empty:
# Pandas errors out if it tries to do fillna and then max on an empty dataframe
lti_df = pd.Series()
else:
lti_df['last_time'] = lti_df['last_time'].astype('datetime64[ns]')
lti_df['last_time_old'] = lti_df['last_time_old'].astype('datetime64[ns]')
lti_df = lti_df.fillna(pd.to_datetime('1800-01-01 00:00')).max(axis=1)
lti_df = lti_df.replace(pd.to_datetime('1800-01-01 00:00'), pd.NaT)
# lti_df = lti_df.apply(lambda x: x.dropna().max(), axis=1)
entity.last_time_index = lti_df
entity.last_time_index.name = 'last_time'
explored.add(entity.id)
self.reset_data_description()
###########################################################################
# Other ###############################################
###########################################################################
def add_interesting_values(self, max_values=5, verbose=False):
"""Find interesting values for categorical variables, to be used to generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for entity in self.entities:
entity.add_interesting_values(max_values=max_values, verbose=verbose)
self.reset_data_description()
def plot(self, to_file=None):
"""
Create a UML diagram-ish graph of the EntitySet.
Args:
to_file (str, optional) : Path to where the plot should be saved.
If set to None (as by default), the plot will not be saved.
Returns:
graphviz.Digraph : Graph object that can directly be displayed in
Jupyter notebooks.
"""
GRAPHVIZ_ERR_MSG = ('Please install graphviz to plot entity sets.' +
' (See https://docs.featuretools.com/en/stable/getting_started/install.html#installing-graphviz for' +
' details)')
graphviz = import_or_raise("graphviz", GRAPHVIZ_ERR_MSG)
# Try rendering a dummy graph to see if a working backend is installed
try:
graphviz.Digraph().pipe()
except graphviz.backend.ExecutableNotFound:
raise RuntimeError(
"To plot entity sets, a graphviz backend is required.\n" +
"Install the backend using one of the following commands:\n" +
" Mac OS: brew install graphviz\n" +
" Linux (Ubuntu): sudo apt-get install graphviz\n" +
" Windows: conda install python-graphviz\n" +
" For more details visit: https://docs.featuretools.com/en/stable/getting_started/install.html"
)
if to_file:
# Explicitly cast to str in case a Path object was passed in
to_file = str(to_file)
split_path = to_file.split('.')
if len(split_path) < 2:
raise ValueError("Please use a file extension like '.pdf'" +
" so that the format can be inferred")
format = split_path[-1]
valid_formats = graphviz.backend.FORMATS
if format not in valid_formats:
raise ValueError("Unknown format. Make sure your format is" +
" amongst the following: %s" % valid_formats)
else:
format = None
# Initialize a new directed graph
graph = graphviz.Digraph(self.id, format=format,
graph_attr={'splines': 'ortho'})
# Draw entities
for entity in self.entities:
variables_string = '\l'.join([var.id + ' : ' + var.type_string # noqa: W605
for var in entity.variables])
nrows = entity.shape[0]
label = '{%s (%d row%s)|%s\l}' % (entity.id, nrows, 's' * (nrows > 1), variables_string) # noqa: W605
graph.node(entity.id, shape='record', label=label)
# Draw relationships
for rel in self.relationships:
# Display the key only once if is the same for both related entities
if rel._parent_variable_id == rel._child_variable_id:
label = rel._parent_variable_id
else:
label = '%s -> %s' % (rel._parent_variable_id,
rel._child_variable_id)
graph.edge(rel._child_entity_id, rel._parent_entity_id, xlabel=label)
if to_file:
# Graphviz always appends the format to the file name, so we need to
# remove it manually to avoid file names like 'file_name.pdf.pdf'
offset = len(format) + 1 # Add 1 for the dot
output_path = to_file[:-offset]
graph.render(output_path, cleanup=True)
return graph | def add_relationship(self, relationship):
"""Add a new relationship between entities in the entityset
Args:
relationship (Relationship) : Instance of new
relationship to be added.
"""
if relationship in self.relationships:
logger.warning(
"Not adding duplicate relationship: %s", relationship)
return self
# _operations?
# this is a new pair of entities
child_e = relationship.child_entity
child_v = relationship.child_variable.id
parent_e = relationship.parent_entity
parent_v = relationship.parent_variable.id
if not isinstance(child_e[child_v], vtypes.Id):
child_e.convert_variable_type(variable_id=child_v,
new_type=vtypes.Id,
convert_data=False)
if not isinstance(parent_e[parent_v], vtypes.Index):
parent_e.convert_variable_type(variable_id=parent_v,
new_type=vtypes.Index,
convert_data=False)
# Empty dataframes (as a result of accessing Entity.metadata)
# default to object dtypes for discrete variables, but
# indexes/ids default to ints. In this case, we convert
# the empty column's type to int
if isinstance(child_e.df, pd.DataFrame) and \
(child_e.df.empty and child_e.df[child_v].dtype == object and
is_numeric_dtype(parent_e.df[parent_v])):
child_e.df[child_v] = pd.Series(name=child_v, dtype=np.int64)
parent_dtype = parent_e.df[parent_v].dtype
child_dtype = child_e.df[child_v].dtype
msg = u"Unable to add relationship because {} in {} is Pandas dtype {}"\
u" and {} in {} is Pandas dtype {}."
if not is_dtype_equal(parent_dtype, child_dtype):
raise ValueError(msg.format(parent_v, parent_e.id, parent_dtype,
child_v, child_e.id, child_dtype))
self.relationships.append(relationship)
self.reset_data_description()
return self | 230 | 277 | import copy
import logging
from collections import defaultdict
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal, is_numeric_dtype
import featuretools.variable_types.variable as vtypes
from featuretools.entityset import deserialize, serialize
from featuretools.entityset.entity import Entity
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.utils.gen_utils import import_or_raise
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger('featuretools.entityset')
class EntitySet(object):
"""
Stores all actual data for a entityset
Attributes:
id
entity_dict
relationships
time_type
Properties:
metadata
"""
def __init__(self, id=None, entities=None, relationships=None):
"""Creates EntitySet
Args:
id (str) : Unique identifier to associate with this instance
entities (dict[str -> tuple(pd.DataFrame, str, str, dict[str -> Variable])]): dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_index), (variable_types), (make_index))}.
Note that time_index, variable_types and make_index are optional.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
Example:
.. code-block:: python
entities = {
"cards" : (card_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
ft.EntitySet("my-entity-set", entities, relationships)
"""
self.id = id
self.entity_dict = {}
self.relationships = []
self.time_type = None
entities = entities or {}
relationships = relationships or []
for entity in entities:
df = entities[entity][0]
index_column = entities[entity][1]
time_index = None
variable_types = None
make_index = None
if len(entities[entity]) > 2:
time_index = entities[entity][2]
if len(entities[entity]) > 3:
variable_types = entities[entity][3]
if len(entities[entity]) > 4:
make_index = entities[entity][4]
self.entity_from_dataframe(entity_id=entity,
dataframe=df,
index=index_column,
time_index=time_index,
variable_types=variable_types,
make_index=make_index)
for relationship in relationships:
parent_variable = self[relationship[0]][relationship[1]]
child_variable = self[relationship[2]][relationship[3]]
self.add_relationship(Relationship(parent_variable,
child_variable))
self.reset_data_description()
def __sizeof__(self):
return sum([entity.__sizeof__() for entity in self.entities])
def __dask_tokenize__(self):
return (EntitySet, serialize.entityset_to_description(self.metadata))
def __eq__(self, other, deep=False):
if len(self.entity_dict) != len(other.entity_dict):
return False
for eid, e in self.entity_dict.items():
if eid not in other.entity_dict:
return False
if not e.__eq__(other[eid], deep=deep):
return False
for r in other.relationships:
if r not in other.relationships:
return False
return True
def __ne__(self, other, deep=False):
return not self.__eq__(other, deep=deep)
def __getitem__(self, entity_id):
"""Get entity instance from entityset
Args:
entity_id (str): Id of entity.
Returns:
:class:`.Entity` : Instance of entity. None if entity doesn't
exist.
"""
if entity_id in self.entity_dict:
return self.entity_dict[entity_id]
name = self.id or "entity set"
raise KeyError('Entity %s does not exist in %s' % (entity_id, name))
@property
def entities(self):
return list(self.entity_dict.values())
@property
def metadata(self):
'''Returns the metadata for this EntitySet. The metadata will be recomputed if it does not exist.'''
if self._data_description is None:
description = serialize.entityset_to_description(self)
self._data_description = deserialize.description_to_entityset(description)
return self._data_description
def reset_data_description(self):
self._data_description = None
def to_pickle(self, path, compression=None, profile_name=None):
'''Write entityset in the pickle format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='pickle', compression=compression, profile_name=profile_name)
return self
def to_parquet(self, path, engine='auto', compression=None, profile_name=None):
'''Write entityset to disk in the parquet format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
engine (str) : Name of the engine to use. Possible values are: {'auto', 'pyarrow', 'fastparquet'}.
compression (str) : Name of the compression to use. Possible values are: {'snappy', 'gzip', 'brotli', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='parquet', engine=engine, compression=compression, profile_name=profile_name)
return self
def to_csv(self, path, sep=',', encoding='utf-8', engine='python', compression=None, profile_name=None):
'''Write entityset to disk in the csv format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str) : Location on disk to write to (will be created as a directory)
sep (str) : String of length 1. Field delimiter for the output file.
encoding (str) : A string representing the encoding to use in the output file, defaults to 'utf-8'.
engine (str) : Name of the engine to use. Possible values are: {'c', 'python'}.
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='csv', index=False, sep=sep, encoding=encoding, engine=engine, compression=compression, profile_name=profile_name)
return self
def to_dictionary(self):
return serialize.entityset_to_description(self)
###########################################################################
# Public getter/setter methods #########################################
###########################################################################
def __repr__(self):
repr_out = u"Entityset: {}\n".format(self.id)
repr_out += u" Entities:"
for e in self.entities:
if e.df.shape:
repr_out += u"\n {} [Rows: {}, Columns: {}]".format(
e.id, e.df.shape[0], e.df.shape[1])
else:
repr_out += u"\n {} [Rows: None, Columns: None]".format(
e.id)
repr_out += "\n Relationships:"
if len(self.relationships) == 0:
repr_out += u"\n No relationships"
for r in self.relationships:
repr_out += u"\n %s.%s -> %s.%s" % \
(r._child_entity_id, r._child_variable_id,
r._parent_entity_id, r._parent_variable_id)
return repr_out
def add_relationships(self, relationships):
"""Add multiple new relationships to a entityset
Args:
relationships (list[Relationship]) : List of new
relationships.
"""
return [self.add_relationship(r) for r in relationships][-1]
def add_relationship(self, relationship):
"""Add a new relationship between entities in the entityset
Args:
relationship (Relationship) : Instance of new
relationship to be added.
"""
if relationship in self.relationships:
logger.warning(
"Not adding duplicate relationship: %s", relationship)
return self
# _operations?
# this is a new pair of entities
child_e = relationship.child_entity
child_v = relationship.child_variable.id
parent_e = relationship.parent_entity
parent_v = relationship.parent_variable.id
if not isinstance(child_e[child_v], vtypes.Id):
child_e.convert_variable_type(variable_id=child_v,
new_type=vtypes.Id,
convert_data=False)
if not isinstance(parent_e[parent_v], vtypes.Index):
parent_e.convert_variable_type(variable_id=parent_v,
new_type=vtypes.Index,
convert_data=False)
# Empty dataframes (as a result of accessing Entity.metadata)
# default to object dtypes for discrete variables, but
# indexes/ids default to ints. In this case, we convert
# the empty column's type to int
if isinstance(child_e.df, pd.DataFrame) and \
(child_e.df.empty and child_e.df[child_v].dtype == object and
is_numeric_dtype(parent_e.df[parent_v])):
child_e.df[child_v] = pd.Series(name=child_v, dtype=np.int64)
parent_dtype = parent_e.df[parent_v].dtype
child_dtype = child_e.df[child_v].dtype
msg = u"Unable to add relationship because {} in {} is Pandas dtype {}"\
u" and {} in {} is Pandas dtype {}."
if not is_dtype_equal(parent_dtype, child_dtype):
raise ValueError(msg.format(parent_v, parent_e.id, parent_dtype,
child_v, child_e.id, child_dtype))
self.relationships.append(relationship)
self.reset_data_description()
return self
###########################################################################
# Relationship access/helper methods ###################################
###########################################################################
def find_forward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all forward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : id of entity to start the search from
goal_entity_id (str) : if of entity to find forward path to
See Also:
:func:`BaseEntitySet.find_backward_paths`
"""
for sub_entity_id, path in self._forward_entity_paths(start_entity_id):
if sub_entity_id == goal_entity_id:
yield path
def find_backward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all backward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : Id of entity to start the search from.
goal_entity_id (str) : Id of entity to find backward path to.
See Also:
:func:`BaseEntitySet.find_forward_paths`
"""
for path in self.find_forward_paths(goal_entity_id, start_entity_id):
# Reverse path
yield path[::-1]
def _forward_entity_paths(self, start_entity_id, seen_entities=None):
"""
Generator which yields the ids of all entities connected through forward
relationships, and the path taken to each. An entity will be yielded
multiple times if there are multiple paths to it.
Implemented using depth first search.
"""
if seen_entities is None:
seen_entities = set()
if start_entity_id in seen_entities:
return
seen_entities.add(start_entity_id)
yield start_entity_id, []
for relationship in self.get_forward_relationships(start_entity_id):
next_entity = relationship.parent_entity.id
# Copy seen entities for each next node to allow multiple paths (but
# not cycles).
descendants = self._forward_entity_paths(next_entity, seen_entities.copy())
for sub_entity_id, sub_path in descendants:
yield sub_entity_id, [relationship] + sub_path
def get_forward_entities(self, entity_id, deep=False):
"""
Get entities that are in a forward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find forward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_forward_relationships(entity_id):
parent_eid = relationship.parent_entity.id
direct_path = RelationshipPath([(True, relationship)])
yield parent_eid, direct_path
if deep:
sub_entities = self.get_forward_entities(parent_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_backward_entities(self, entity_id, deep=False):
"""
Get entities that are in a backward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find backward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_backward_relationships(entity_id):
child_eid = relationship.child_entity.id
direct_path = RelationshipPath([(False, relationship)])
yield child_eid, direct_path
if deep:
sub_entities = self.get_backward_entities(child_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_forward_relationships(self, entity_id):
"""Get relationships where entity "entity_id" is the child
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: List of forward relationships.
"""
return [r for r in self.relationships if r.child_entity.id == entity_id]
def get_backward_relationships(self, entity_id):
"""
get relationships where entity "entity_id" is the parent.
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: list of backward relationships
"""
return [r for r in self.relationships if r.parent_entity.id == entity_id]
def has_unique_forward_path(self, start_entity_id, end_entity_id):
"""
Is the forward path from start to end unique?
This will raise if there is no such path.
"""
paths = self.find_forward_paths(start_entity_id, end_entity_id)
next(paths)
second_path = next(paths, None)
return not second_path
###########################################################################
# Entity creation methods ##############################################
###########################################################################
def entity_from_dataframe(self,
entity_id,
dataframe,
index=None,
variable_types=None,
make_index=False,
time_index=None,
secondary_time_index=None,
already_sorted=False):
"""
Load the data for a specified entity from a Pandas DataFrame.
Args:
entity_id (str) : Unique id to associate with this entity.
dataframe (pandas.DataFrame) : Dataframe containing the data.
index (str, optional): Name of the variable used to index the entity.
If None, take the first column.
variable_types (dict[str -> Variable/str], optional):
Keys are of variable ids and values are variable types or type_strings. Used to to
initialize an entity's store.
make_index (bool, optional) : If True, assume index does not
exist as a column in dataframe, and create a new column of that name
using integers. Otherwise, assume index exists.
time_index (str, optional): Name of the variable containing
time data. Type must be in :class:`variables.DateTime` or be
able to be cast to datetime (e.g. str, float, or numeric.)
secondary_time_index (dict[str -> Variable]): Name of variable
containing time data to use a second time index for the entity.
already_sorted (bool, optional) : If True, assumes that input dataframe
is already sorted by time. Defaults to False.
Notes:
Will infer variable types from Pandas dtype
Example:
.. ipython:: python
import featuretools as ft
import pandas as pd
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"session_id": [1, 2, 1, 3, 4, 5],
"amount": [100.40, 20.63, 33.32, 13.12, 67.22, 1.00],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"fraud": [True, False, True, False, True, True]})
es = ft.EntitySet("example")
es.entity_from_dataframe(entity_id="transactions",
index="id",
time_index="transaction_time",
dataframe=transactions_df)
es["transactions"]
es["transactions"].df
"""
variable_types = variable_types or {}
if time_index is not None and time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (time_index))
if time_index is None:
for variable, variable_type in variable_types.items():
if variable_type == vtypes.DatetimeTimeIndex:
raise ValueError("DatetimeTimeIndex variable %s must be set using time_index parameter" % (variable))
if len(self.entities) > 0:
if not isinstance(dataframe, type(self.entities[0].df)):
raise ValueError("All entity dataframes must be of the same type. "
"Cannot add entity of type {} to an entityset with existing entities "
"of type {}".format(type(dataframe), type(self.entities[0].df)))
entity = Entity(
entity_id,
dataframe,
self,
variable_types=variable_types,
index=index,
time_index=time_index,
secondary_time_index=secondary_time_index,
already_sorted=already_sorted,
make_index=make_index)
self.entity_dict[entity.id] = entity
self.reset_data_description()
return self
def normalize_entity(self, base_entity_id, new_entity_id, index,
additional_variables=None, copy_variables=None,
make_time_index=None,
make_secondary_time_index=None,
new_entity_time_index=None,
new_entity_secondary_time_index=None):
"""Create a new entity and relationship from unique values of an existing variable.
Args:
base_entity_id (str) : Entity id from which to split.
new_entity_id (str): Id of the new entity.
index (str): Variable in old entity
that will become index of new entity. Relationship
will be created across this variable.
additional_variables (list[str]):
List of variable ids to remove from
base_entity and move to new entity.
copy_variables (list[str]): List of
variable ids to copy from old entity
and move to new entity.
make_time_index (bool or str, optional): Create time index for new entity based
on time index in base_entity, optionally specifying which variable in base_entity
to use for time_index. If specified as True without a specific variable,
uses the primary time index. Defaults to True if base entity has a time index.
make_secondary_time_index (dict[str -> list[str]], optional): Create a secondary time index
from key. Values of dictionary
are the variables to associate with the secondary time index. Only one
secondary time index is allowed. If None, only associate the time index.
new_entity_time_index (str, optional): Rename new entity time index.
new_entity_secondary_time_index (str, optional): Rename new entity secondary time index.
"""
base_entity = self.entity_dict[base_entity_id]
additional_variables = additional_variables or []
copy_variables = copy_variables or []
# Check base entity to make sure time index is valid
if base_entity.time_index is not None:
t_index = base_entity[base_entity.time_index]
if not isinstance(t_index, (vtypes.NumericTimeIndex, vtypes.DatetimeTimeIndex)):
base_error = "Time index '{0}' is not a NumericTimeIndex or DatetimeTimeIndex, but type {1}. Use set_time_index on entity '{2}' to set the time_index."
raise TypeError(base_error.format(base_entity.time_index, type(t_index), str(base_entity.id)))
if not isinstance(additional_variables, list):
raise TypeError("'additional_variables' must be a list, but received type {}"
.format(type(additional_variables)))
if len(additional_variables) != len(set(additional_variables)):
raise ValueError("'additional_variables' contains duplicate variables. All variables must be unique.")
if not isinstance(copy_variables, list):
raise TypeError("'copy_variables' must be a list, but received type {}"
.format(type(copy_variables)))
if len(copy_variables) != len(set(copy_variables)):
raise ValueError("'copy_variables' contains duplicate variables. All variables must be unique.")
for v in additional_variables + copy_variables:
if v == index:
raise ValueError("Not copying {} as both index and variable".format(v))
for v in additional_variables:
if v == base_entity.time_index:
raise ValueError("Not moving {} as it is the base time index variable. Perhaps, move the variable to the copy_variables.".format(v))
if isinstance(make_time_index, str):
if make_time_index not in base_entity.df.columns:
raise ValueError("'make_time_index' must be a variable in the base entity")
elif make_time_index not in additional_variables + copy_variables:
raise ValueError("'make_time_index' must be specified in 'additional_variables' or 'copy_variables'")
if index == base_entity.index:
raise ValueError("'index' must be different from the index column of the base entity")
transfer_types = {}
transfer_types[index] = type(base_entity[index])
for v in additional_variables + copy_variables:
if type(base_entity[v]) == vtypes.DatetimeTimeIndex:
transfer_types[v] = vtypes.Datetime
elif type(base_entity[v]) == vtypes.NumericTimeIndex:
transfer_types[v] = vtypes.Numeric
else:
transfer_types[v] = type(base_entity[v])
# create and add new entity
new_entity_df = self[base_entity_id].df.copy()
if make_time_index is None and base_entity.time_index is not None:
make_time_index = True
if isinstance(make_time_index, str):
# Set the new time index to make_time_index.
base_time_index = make_time_index
new_entity_time_index = make_time_index
already_sorted = (new_entity_time_index == base_entity.time_index)
elif make_time_index:
# Create a new time index based on the base entity time index.
base_time_index = base_entity.time_index
if new_entity_time_index is None:
new_entity_time_index = "first_%s_time" % (base_entity.id)
already_sorted = True
assert base_entity.time_index is not None, \
"Base entity doesn't have time_index defined"
if base_time_index not in [v for v in additional_variables]:
copy_variables.append(base_time_index)
transfer_types[new_entity_time_index] = type(base_entity[base_entity.time_index])
else:
new_entity_time_index = None
already_sorted = False
if new_entity_time_index is not None and new_entity_time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (new_entity_time_index))
selected_variables = [index] +\
[v for v in additional_variables] +\
[v for v in copy_variables]
new_entity_df2 = new_entity_df. \
drop_duplicates(index, keep='first')[selected_variables]
if make_time_index:
new_entity_df2 = new_entity_df2.rename(columns={base_time_index: new_entity_time_index})
if make_secondary_time_index:
assert len(make_secondary_time_index) == 1, "Can only provide 1 secondary time index"
secondary_time_index = list(make_secondary_time_index.keys())[0]
secondary_variables = [index, secondary_time_index] + list(make_secondary_time_index.values())[0]
secondary_df = new_entity_df. \
drop_duplicates(index, keep='last')[secondary_variables]
if new_entity_secondary_time_index:
secondary_df = secondary_df.rename(columns={secondary_time_index: new_entity_secondary_time_index})
secondary_time_index = new_entity_secondary_time_index
else:
new_entity_secondary_time_index = secondary_time_index
secondary_df = secondary_df.set_index(index)
new_entity_df = new_entity_df2.join(secondary_df, on=index)
else:
new_entity_df = new_entity_df2
base_entity_index = index
transfer_types[index] = vtypes.Categorical
if make_secondary_time_index:
old_ti_name = list(make_secondary_time_index.keys())[0]
ti_cols = list(make_secondary_time_index.values())[0]
ti_cols = [c if c != old_ti_name else secondary_time_index for c in ti_cols]
make_secondary_time_index = {secondary_time_index: ti_cols}
self.entity_from_dataframe(
new_entity_id,
new_entity_df,
index,
already_sorted=already_sorted,
time_index=new_entity_time_index,
secondary_time_index=make_secondary_time_index,
variable_types=transfer_types)
self.entity_dict[base_entity_id].delete_variables(additional_variables)
new_entity = self.entity_dict[new_entity_id]
base_entity.convert_variable_type(base_entity_index, vtypes.Id, convert_data=False)
self.add_relationship(Relationship(new_entity[index], base_entity[base_entity_index]))
self.reset_data_description()
return self
###########################################################################
# Data wrangling methods ###############################################
###########################################################################
def concat(self, other, inplace=False):
'''Combine entityset with another to create a new entityset with the
combined data of both entitysets.
'''
assert_string = "Entitysets must have the same entities, relationships"\
", and variable_ids"
assert (self.__eq__(other) and
self.relationships == other.relationships), assert_string
for entity in self.entities:
assert entity.id in other.entity_dict, assert_string
assert (len(self[entity.id].variables) ==
len(other[entity.id].variables)), assert_string
other_variable_ids = [o_variable.id for o_variable in
other[entity.id].variables]
assert (all([variable.id in other_variable_ids
for variable in self[entity.id].variables])), assert_string
if inplace:
combined_es = self
else:
combined_es = copy.deepcopy(self)
has_last_time_index = []
for entity in self.entities:
self_df = entity.df
other_df = other[entity.id].df
combined_df = pd.concat([self_df, other_df])
if entity.created_index == entity.index:
columns = [col for col in combined_df.columns if
col != entity.index or col != entity.time_index]
else:
columns = [entity.index]
combined_df.drop_duplicates(columns, inplace=True)
if entity.time_index:
combined_df.sort_values([entity.time_index, entity.index], inplace=True)
else:
combined_df.sort_index(inplace=True)
if (entity.last_time_index is not None or
other[entity.id].last_time_index is not None):
has_last_time_index.append(entity.id)
combined_es[entity.id].update_data(df=combined_df,
recalculate_last_time_indexes=False)
combined_es.add_last_time_indexes(updated_entities=has_last_time_index)
self.reset_data_description()
return combined_es
###########################################################################
# Indexing methods ###############################################
###########################################################################
def add_last_time_indexes(self, updated_entities=None):
"""
Calculates the last time index values for each entity (the last time
an instance or children of that instance were observed). Used when
calculating features using training windows
Args:
updated_entities (list[str]): List of entity ids to update last_time_index for
(will update all parents of those entities as well)
"""
# Generate graph of entities to find leaf entities
children = defaultdict(list) # parent --> child mapping
child_vars = defaultdict(dict)
for r in self.relationships:
children[r.parent_entity.id].append(r.child_entity)
child_vars[r.parent_entity.id][r.child_entity.id] = r.child_variable
updated_entities = updated_entities or []
if updated_entities:
# find parents of updated_entities
parent_queue = updated_entities[:]
parents = set()
while len(parent_queue):
e = parent_queue.pop(0)
if e in parents:
continue
parents.add(e)
for parent_id, _ in self.get_forward_entities(e):
parent_queue.append(parent_id)
queue = [self[p] for p in parents]
to_explore = parents
else:
to_explore = set([e.id for e in self.entities[:]])
queue = self.entities[:]
explored = set()
for e in queue:
e.last_time_index = None
# We will explore children of entities on the queue,
# which may not be in the to_explore set. Therefore,
# we check whether all elements of to_explore are in
# explored, rather than just comparing length
while not to_explore.issubset(explored):
entity = queue.pop(0)
if entity.last_time_index is None:
if entity.time_index is not None:
lti = entity.df[entity.time_index].copy()
if isinstance(entity.df, dd.DataFrame):
# The current Dask implementation doesn't set the index of the dataframe
# to the entity's index, so we have to do it manually here
lti.index = entity.df[entity.index].copy()
else:
lti = entity.df[entity.index].copy()
if isinstance(entity.df, dd.DataFrame):
lti.index = entity.df[entity.index].copy()
lti = lti.apply(lambda x: None)
else:
lti[:] = None
entity.last_time_index = lti
if entity.id in children:
child_entities = children[entity.id]
# if all children not explored, skip for now
if not set([e.id for e in child_entities]).issubset(explored):
# Now there is a possibility that a child entity
# was not explicitly provided in updated_entities,
# and never made it onto the queue. If updated_entities
# is None then we just load all entities onto the queue
# so we didn't need this logic
for e in child_entities:
if e.id not in explored and e.id not in [q.id for q in queue]:
queue.append(e)
queue.append(entity)
continue
# updated last time from all children
for child_e in child_entities:
if child_e.last_time_index is None:
continue
link_var = child_vars[entity.id][child_e.id].id
if isinstance(child_e.last_time_index, dd.Series):
to_join = child_e.df[link_var]
to_join.index = child_e.df[child_e.index]
lti_df = child_e.last_time_index.to_frame(name='last_time').join(
to_join.to_frame(name=entity.index)
)
new_index = lti_df.index.copy()
new_index.name = None
lti_df.index = new_index
lti_df = lti_df.groupby(lti_df[entity.index]).agg('max')
lti_df = entity.last_time_index.to_frame(name='last_time_old').join(lti_df)
else:
lti_df = pd.DataFrame({'last_time': child_e.last_time_index,
entity.index: child_e.df[link_var]})
# sort by time and keep only the most recent
lti_df.sort_values(['last_time', entity.index],
kind="mergesort", inplace=True)
lti_df.drop_duplicates(entity.index,
keep='last',
inplace=True)
lti_df.set_index(entity.index, inplace=True)
lti_df = lti_df.reindex(entity.last_time_index.index)
lti_df['last_time_old'] = entity.last_time_index
if not isinstance(lti_df, dd.DataFrame) and lti_df.empty:
# Pandas errors out if it tries to do fillna and then max on an empty dataframe
lti_df = pd.Series()
else:
lti_df['last_time'] = lti_df['last_time'].astype('datetime64[ns]')
lti_df['last_time_old'] = lti_df['last_time_old'].astype('datetime64[ns]')
lti_df = lti_df.fillna(pd.to_datetime('1800-01-01 00:00')).max(axis=1)
lti_df = lti_df.replace(pd.to_datetime('1800-01-01 00:00'), pd.NaT)
# lti_df = lti_df.apply(lambda x: x.dropna().max(), axis=1)
entity.last_time_index = lti_df
entity.last_time_index.name = 'last_time'
explored.add(entity.id)
self.reset_data_description()
###########################################################################
# Other ###############################################
###########################################################################
def add_interesting_values(self, max_values=5, verbose=False):
"""Find interesting values for categorical variables, to be used to generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for entity in self.entities:
entity.add_interesting_values(max_values=max_values, verbose=verbose)
self.reset_data_description()
def plot(self, to_file=None):
"""
Create a UML diagram-ish graph of the EntitySet.
Args:
to_file (str, optional) : Path to where the plot should be saved.
If set to None (as by default), the plot will not be saved.
Returns:
graphviz.Digraph : Graph object that can directly be displayed in
Jupyter notebooks.
"""
GRAPHVIZ_ERR_MSG = ('Please install graphviz to plot entity sets.' +
' (See https://docs.featuretools.com/en/stable/getting_started/install.html#installing-graphviz for' +
' details)')
graphviz = import_or_raise("graphviz", GRAPHVIZ_ERR_MSG)
# Try rendering a dummy graph to see if a working backend is installed
try:
graphviz.Digraph().pipe()
except graphviz.backend.ExecutableNotFound:
raise RuntimeError(
"To plot entity sets, a graphviz backend is required.\n" +
"Install the backend using one of the following commands:\n" +
" Mac OS: brew install graphviz\n" +
" Linux (Ubuntu): sudo apt-get install graphviz\n" +
" Windows: conda install python-graphviz\n" +
" For more details visit: https://docs.featuretools.com/en/stable/getting_started/install.html"
)
if to_file:
# Explicitly cast to str in case a Path object was passed in
to_file = str(to_file)
split_path = to_file.split('.')
if len(split_path) < 2:
raise ValueError("Please use a file extension like '.pdf'" +
" so that the format can be inferred")
format = split_path[-1]
valid_formats = graphviz.backend.FORMATS
if format not in valid_formats:
raise ValueError("Unknown format. Make sure your format is" +
" amongst the following: %s" % valid_formats)
else:
format = None
# Initialize a new directed graph
graph = graphviz.Digraph(self.id, format=format,
graph_attr={'splines': 'ortho'})
# Draw entities
for entity in self.entities:
variables_string = '\l'.join([var.id + ' : ' + var.type_string # noqa: W605
for var in entity.variables])
nrows = entity.shape[0]
label = '{%s (%d row%s)|%s\l}' % (entity.id, nrows, 's' * (nrows > 1), variables_string) # noqa: W605
graph.node(entity.id, shape='record', label=label)
# Draw relationships
for rel in self.relationships:
# Display the key only once if is the same for both related entities
if rel._parent_variable_id == rel._child_variable_id:
label = rel._parent_variable_id
else:
label = '%s -> %s' % (rel._parent_variable_id,
rel._child_variable_id)
graph.edge(rel._child_entity_id, rel._parent_entity_id, xlabel=label)
if to_file:
# Graphviz always appends the format to the file name, so we need to
# remove it manually to avoid file names like 'file_name.pdf.pdf'
offset = len(format) + 1 # Add 1 for the dot
output_path = to_file[:-offset]
graph.render(output_path, cleanup=True)
return graph
|
_forward_entity_paths | Generator which yields the ids of all entities connected through forward
relationships, and the path taken to each. An entity will be yielded
multiple times if there are multiple paths to it.
Implemented using depth first search. | import copy
import logging
from collections import defaultdict
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal, is_numeric_dtype
import featuretools.variable_types.variable as vtypes
from featuretools.entityset import deserialize, serialize
from featuretools.entityset.entity import Entity
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.utils.gen_utils import import_or_raise
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger('featuretools.entityset')
class EntitySet(object):
"""
Stores all actual data for a entityset
Attributes:
id
entity_dict
relationships
time_type
Properties:
metadata
"""
def __init__(self, id=None, entities=None, relationships=None):
"""Creates EntitySet
Args:
id (str) : Unique identifier to associate with this instance
entities (dict[str -> tuple(pd.DataFrame, str, str, dict[str -> Variable])]): dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_index), (variable_types), (make_index))}.
Note that time_index, variable_types and make_index are optional.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
Example:
.. code-block:: python
entities = {
"cards" : (card_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
ft.EntitySet("my-entity-set", entities, relationships)
"""
self.id = id
self.entity_dict = {}
self.relationships = []
self.time_type = None
entities = entities or {}
relationships = relationships or []
for entity in entities:
df = entities[entity][0]
index_column = entities[entity][1]
time_index = None
variable_types = None
make_index = None
if len(entities[entity]) > 2:
time_index = entities[entity][2]
if len(entities[entity]) > 3:
variable_types = entities[entity][3]
if len(entities[entity]) > 4:
make_index = entities[entity][4]
self.entity_from_dataframe(entity_id=entity,
dataframe=df,
index=index_column,
time_index=time_index,
variable_types=variable_types,
make_index=make_index)
for relationship in relationships:
parent_variable = self[relationship[0]][relationship[1]]
child_variable = self[relationship[2]][relationship[3]]
self.add_relationship(Relationship(parent_variable,
child_variable))
self.reset_data_description()
def __sizeof__(self):
return sum([entity.__sizeof__() for entity in self.entities])
def __dask_tokenize__(self):
return (EntitySet, serialize.entityset_to_description(self.metadata))
def __eq__(self, other, deep=False):
if len(self.entity_dict) != len(other.entity_dict):
return False
for eid, e in self.entity_dict.items():
if eid not in other.entity_dict:
return False
if not e.__eq__(other[eid], deep=deep):
return False
for r in other.relationships:
if r not in other.relationships:
return False
return True
def __ne__(self, other, deep=False):
return not self.__eq__(other, deep=deep)
def __getitem__(self, entity_id):
"""Get entity instance from entityset
Args:
entity_id (str): Id of entity.
Returns:
:class:`.Entity` : Instance of entity. None if entity doesn't
exist.
"""
if entity_id in self.entity_dict:
return self.entity_dict[entity_id]
name = self.id or "entity set"
raise KeyError('Entity %s does not exist in %s' % (entity_id, name))
@property
def entities(self):
return list(self.entity_dict.values())
@property
def metadata(self):
'''Returns the metadata for this EntitySet. The metadata will be recomputed if it does not exist.'''
if self._data_description is None:
description = serialize.entityset_to_description(self)
self._data_description = deserialize.description_to_entityset(description)
return self._data_description
def reset_data_description(self):
self._data_description = None
def to_pickle(self, path, compression=None, profile_name=None):
'''Write entityset in the pickle format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='pickle', compression=compression, profile_name=profile_name)
return self
def to_parquet(self, path, engine='auto', compression=None, profile_name=None):
'''Write entityset to disk in the parquet format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
engine (str) : Name of the engine to use. Possible values are: {'auto', 'pyarrow', 'fastparquet'}.
compression (str) : Name of the compression to use. Possible values are: {'snappy', 'gzip', 'brotli', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='parquet', engine=engine, compression=compression, profile_name=profile_name)
return self
def to_csv(self, path, sep=',', encoding='utf-8', engine='python', compression=None, profile_name=None):
'''Write entityset to disk in the csv format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str) : Location on disk to write to (will be created as a directory)
sep (str) : String of length 1. Field delimiter for the output file.
encoding (str) : A string representing the encoding to use in the output file, defaults to 'utf-8'.
engine (str) : Name of the engine to use. Possible values are: {'c', 'python'}.
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='csv', index=False, sep=sep, encoding=encoding, engine=engine, compression=compression, profile_name=profile_name)
return self
def to_dictionary(self):
return serialize.entityset_to_description(self)
###########################################################################
# Public getter/setter methods #########################################
###########################################################################
def __repr__(self):
repr_out = u"Entityset: {}\n".format(self.id)
repr_out += u" Entities:"
for e in self.entities:
if e.df.shape:
repr_out += u"\n {} [Rows: {}, Columns: {}]".format(
e.id, e.df.shape[0], e.df.shape[1])
else:
repr_out += u"\n {} [Rows: None, Columns: None]".format(
e.id)
repr_out += "\n Relationships:"
if len(self.relationships) == 0:
repr_out += u"\n No relationships"
for r in self.relationships:
repr_out += u"\n %s.%s -> %s.%s" % \
(r._child_entity_id, r._child_variable_id,
r._parent_entity_id, r._parent_variable_id)
return repr_out
def add_relationships(self, relationships):
"""Add multiple new relationships to a entityset
Args:
relationships (list[Relationship]) : List of new
relationships.
"""
return [self.add_relationship(r) for r in relationships][-1]
def add_relationship(self, relationship):
"""Add a new relationship between entities in the entityset
Args:
relationship (Relationship) : Instance of new
relationship to be added.
"""
if relationship in self.relationships:
logger.warning(
"Not adding duplicate relationship: %s", relationship)
return self
# _operations?
# this is a new pair of entities
child_e = relationship.child_entity
child_v = relationship.child_variable.id
parent_e = relationship.parent_entity
parent_v = relationship.parent_variable.id
if not isinstance(child_e[child_v], vtypes.Id):
child_e.convert_variable_type(variable_id=child_v,
new_type=vtypes.Id,
convert_data=False)
if not isinstance(parent_e[parent_v], vtypes.Index):
parent_e.convert_variable_type(variable_id=parent_v,
new_type=vtypes.Index,
convert_data=False)
# Empty dataframes (as a result of accessing Entity.metadata)
# default to object dtypes for discrete variables, but
# indexes/ids default to ints. In this case, we convert
# the empty column's type to int
if isinstance(child_e.df, pd.DataFrame) and \
(child_e.df.empty and child_e.df[child_v].dtype == object and
is_numeric_dtype(parent_e.df[parent_v])):
child_e.df[child_v] = pd.Series(name=child_v, dtype=np.int64)
parent_dtype = parent_e.df[parent_v].dtype
child_dtype = child_e.df[child_v].dtype
msg = u"Unable to add relationship because {} in {} is Pandas dtype {}"\
u" and {} in {} is Pandas dtype {}."
if not is_dtype_equal(parent_dtype, child_dtype):
raise ValueError(msg.format(parent_v, parent_e.id, parent_dtype,
child_v, child_e.id, child_dtype))
self.relationships.append(relationship)
self.reset_data_description()
return self
###########################################################################
# Relationship access/helper methods ###################################
###########################################################################
def find_forward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all forward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : id of entity to start the search from
goal_entity_id (str) : if of entity to find forward path to
See Also:
:func:`BaseEntitySet.find_backward_paths`
"""
for sub_entity_id, path in self._forward_entity_paths(start_entity_id):
if sub_entity_id == goal_entity_id:
yield path
def find_backward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all backward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : Id of entity to start the search from.
goal_entity_id (str) : Id of entity to find backward path to.
See Also:
:func:`BaseEntitySet.find_forward_paths`
"""
for path in self.find_forward_paths(goal_entity_id, start_entity_id):
# Reverse path
yield path[::-1]
# MASKED: _forward_entity_paths function (lines 315-339)
def get_forward_entities(self, entity_id, deep=False):
"""
Get entities that are in a forward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find forward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_forward_relationships(entity_id):
parent_eid = relationship.parent_entity.id
direct_path = RelationshipPath([(True, relationship)])
yield parent_eid, direct_path
if deep:
sub_entities = self.get_forward_entities(parent_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_backward_entities(self, entity_id, deep=False):
"""
Get entities that are in a backward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find backward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_backward_relationships(entity_id):
child_eid = relationship.child_entity.id
direct_path = RelationshipPath([(False, relationship)])
yield child_eid, direct_path
if deep:
sub_entities = self.get_backward_entities(child_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_forward_relationships(self, entity_id):
"""Get relationships where entity "entity_id" is the child
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: List of forward relationships.
"""
return [r for r in self.relationships if r.child_entity.id == entity_id]
def get_backward_relationships(self, entity_id):
"""
get relationships where entity "entity_id" is the parent.
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: list of backward relationships
"""
return [r for r in self.relationships if r.parent_entity.id == entity_id]
def has_unique_forward_path(self, start_entity_id, end_entity_id):
"""
Is the forward path from start to end unique?
This will raise if there is no such path.
"""
paths = self.find_forward_paths(start_entity_id, end_entity_id)
next(paths)
second_path = next(paths, None)
return not second_path
###########################################################################
# Entity creation methods ##############################################
###########################################################################
def entity_from_dataframe(self,
entity_id,
dataframe,
index=None,
variable_types=None,
make_index=False,
time_index=None,
secondary_time_index=None,
already_sorted=False):
"""
Load the data for a specified entity from a Pandas DataFrame.
Args:
entity_id (str) : Unique id to associate with this entity.
dataframe (pandas.DataFrame) : Dataframe containing the data.
index (str, optional): Name of the variable used to index the entity.
If None, take the first column.
variable_types (dict[str -> Variable/str], optional):
Keys are of variable ids and values are variable types or type_strings. Used to to
initialize an entity's store.
make_index (bool, optional) : If True, assume index does not
exist as a column in dataframe, and create a new column of that name
using integers. Otherwise, assume index exists.
time_index (str, optional): Name of the variable containing
time data. Type must be in :class:`variables.DateTime` or be
able to be cast to datetime (e.g. str, float, or numeric.)
secondary_time_index (dict[str -> Variable]): Name of variable
containing time data to use a second time index for the entity.
already_sorted (bool, optional) : If True, assumes that input dataframe
is already sorted by time. Defaults to False.
Notes:
Will infer variable types from Pandas dtype
Example:
.. ipython:: python
import featuretools as ft
import pandas as pd
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"session_id": [1, 2, 1, 3, 4, 5],
"amount": [100.40, 20.63, 33.32, 13.12, 67.22, 1.00],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"fraud": [True, False, True, False, True, True]})
es = ft.EntitySet("example")
es.entity_from_dataframe(entity_id="transactions",
index="id",
time_index="transaction_time",
dataframe=transactions_df)
es["transactions"]
es["transactions"].df
"""
variable_types = variable_types or {}
if time_index is not None and time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (time_index))
if time_index is None:
for variable, variable_type in variable_types.items():
if variable_type == vtypes.DatetimeTimeIndex:
raise ValueError("DatetimeTimeIndex variable %s must be set using time_index parameter" % (variable))
if len(self.entities) > 0:
if not isinstance(dataframe, type(self.entities[0].df)):
raise ValueError("All entity dataframes must be of the same type. "
"Cannot add entity of type {} to an entityset with existing entities "
"of type {}".format(type(dataframe), type(self.entities[0].df)))
entity = Entity(
entity_id,
dataframe,
self,
variable_types=variable_types,
index=index,
time_index=time_index,
secondary_time_index=secondary_time_index,
already_sorted=already_sorted,
make_index=make_index)
self.entity_dict[entity.id] = entity
self.reset_data_description()
return self
def normalize_entity(self, base_entity_id, new_entity_id, index,
additional_variables=None, copy_variables=None,
make_time_index=None,
make_secondary_time_index=None,
new_entity_time_index=None,
new_entity_secondary_time_index=None):
"""Create a new entity and relationship from unique values of an existing variable.
Args:
base_entity_id (str) : Entity id from which to split.
new_entity_id (str): Id of the new entity.
index (str): Variable in old entity
that will become index of new entity. Relationship
will be created across this variable.
additional_variables (list[str]):
List of variable ids to remove from
base_entity and move to new entity.
copy_variables (list[str]): List of
variable ids to copy from old entity
and move to new entity.
make_time_index (bool or str, optional): Create time index for new entity based
on time index in base_entity, optionally specifying which variable in base_entity
to use for time_index. If specified as True without a specific variable,
uses the primary time index. Defaults to True if base entity has a time index.
make_secondary_time_index (dict[str -> list[str]], optional): Create a secondary time index
from key. Values of dictionary
are the variables to associate with the secondary time index. Only one
secondary time index is allowed. If None, only associate the time index.
new_entity_time_index (str, optional): Rename new entity time index.
new_entity_secondary_time_index (str, optional): Rename new entity secondary time index.
"""
base_entity = self.entity_dict[base_entity_id]
additional_variables = additional_variables or []
copy_variables = copy_variables or []
# Check base entity to make sure time index is valid
if base_entity.time_index is not None:
t_index = base_entity[base_entity.time_index]
if not isinstance(t_index, (vtypes.NumericTimeIndex, vtypes.DatetimeTimeIndex)):
base_error = "Time index '{0}' is not a NumericTimeIndex or DatetimeTimeIndex, but type {1}. Use set_time_index on entity '{2}' to set the time_index."
raise TypeError(base_error.format(base_entity.time_index, type(t_index), str(base_entity.id)))
if not isinstance(additional_variables, list):
raise TypeError("'additional_variables' must be a list, but received type {}"
.format(type(additional_variables)))
if len(additional_variables) != len(set(additional_variables)):
raise ValueError("'additional_variables' contains duplicate variables. All variables must be unique.")
if not isinstance(copy_variables, list):
raise TypeError("'copy_variables' must be a list, but received type {}"
.format(type(copy_variables)))
if len(copy_variables) != len(set(copy_variables)):
raise ValueError("'copy_variables' contains duplicate variables. All variables must be unique.")
for v in additional_variables + copy_variables:
if v == index:
raise ValueError("Not copying {} as both index and variable".format(v))
for v in additional_variables:
if v == base_entity.time_index:
raise ValueError("Not moving {} as it is the base time index variable. Perhaps, move the variable to the copy_variables.".format(v))
if isinstance(make_time_index, str):
if make_time_index not in base_entity.df.columns:
raise ValueError("'make_time_index' must be a variable in the base entity")
elif make_time_index not in additional_variables + copy_variables:
raise ValueError("'make_time_index' must be specified in 'additional_variables' or 'copy_variables'")
if index == base_entity.index:
raise ValueError("'index' must be different from the index column of the base entity")
transfer_types = {}
transfer_types[index] = type(base_entity[index])
for v in additional_variables + copy_variables:
if type(base_entity[v]) == vtypes.DatetimeTimeIndex:
transfer_types[v] = vtypes.Datetime
elif type(base_entity[v]) == vtypes.NumericTimeIndex:
transfer_types[v] = vtypes.Numeric
else:
transfer_types[v] = type(base_entity[v])
# create and add new entity
new_entity_df = self[base_entity_id].df.copy()
if make_time_index is None and base_entity.time_index is not None:
make_time_index = True
if isinstance(make_time_index, str):
# Set the new time index to make_time_index.
base_time_index = make_time_index
new_entity_time_index = make_time_index
already_sorted = (new_entity_time_index == base_entity.time_index)
elif make_time_index:
# Create a new time index based on the base entity time index.
base_time_index = base_entity.time_index
if new_entity_time_index is None:
new_entity_time_index = "first_%s_time" % (base_entity.id)
already_sorted = True
assert base_entity.time_index is not None, \
"Base entity doesn't have time_index defined"
if base_time_index not in [v for v in additional_variables]:
copy_variables.append(base_time_index)
transfer_types[new_entity_time_index] = type(base_entity[base_entity.time_index])
else:
new_entity_time_index = None
already_sorted = False
if new_entity_time_index is not None and new_entity_time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (new_entity_time_index))
selected_variables = [index] +\
[v for v in additional_variables] +\
[v for v in copy_variables]
new_entity_df2 = new_entity_df. \
drop_duplicates(index, keep='first')[selected_variables]
if make_time_index:
new_entity_df2 = new_entity_df2.rename(columns={base_time_index: new_entity_time_index})
if make_secondary_time_index:
assert len(make_secondary_time_index) == 1, "Can only provide 1 secondary time index"
secondary_time_index = list(make_secondary_time_index.keys())[0]
secondary_variables = [index, secondary_time_index] + list(make_secondary_time_index.values())[0]
secondary_df = new_entity_df. \
drop_duplicates(index, keep='last')[secondary_variables]
if new_entity_secondary_time_index:
secondary_df = secondary_df.rename(columns={secondary_time_index: new_entity_secondary_time_index})
secondary_time_index = new_entity_secondary_time_index
else:
new_entity_secondary_time_index = secondary_time_index
secondary_df = secondary_df.set_index(index)
new_entity_df = new_entity_df2.join(secondary_df, on=index)
else:
new_entity_df = new_entity_df2
base_entity_index = index
transfer_types[index] = vtypes.Categorical
if make_secondary_time_index:
old_ti_name = list(make_secondary_time_index.keys())[0]
ti_cols = list(make_secondary_time_index.values())[0]
ti_cols = [c if c != old_ti_name else secondary_time_index for c in ti_cols]
make_secondary_time_index = {secondary_time_index: ti_cols}
self.entity_from_dataframe(
new_entity_id,
new_entity_df,
index,
already_sorted=already_sorted,
time_index=new_entity_time_index,
secondary_time_index=make_secondary_time_index,
variable_types=transfer_types)
self.entity_dict[base_entity_id].delete_variables(additional_variables)
new_entity = self.entity_dict[new_entity_id]
base_entity.convert_variable_type(base_entity_index, vtypes.Id, convert_data=False)
self.add_relationship(Relationship(new_entity[index], base_entity[base_entity_index]))
self.reset_data_description()
return self
###########################################################################
# Data wrangling methods ###############################################
###########################################################################
def concat(self, other, inplace=False):
'''Combine entityset with another to create a new entityset with the
combined data of both entitysets.
'''
assert_string = "Entitysets must have the same entities, relationships"\
", and variable_ids"
assert (self.__eq__(other) and
self.relationships == other.relationships), assert_string
for entity in self.entities:
assert entity.id in other.entity_dict, assert_string
assert (len(self[entity.id].variables) ==
len(other[entity.id].variables)), assert_string
other_variable_ids = [o_variable.id for o_variable in
other[entity.id].variables]
assert (all([variable.id in other_variable_ids
for variable in self[entity.id].variables])), assert_string
if inplace:
combined_es = self
else:
combined_es = copy.deepcopy(self)
has_last_time_index = []
for entity in self.entities:
self_df = entity.df
other_df = other[entity.id].df
combined_df = pd.concat([self_df, other_df])
if entity.created_index == entity.index:
columns = [col for col in combined_df.columns if
col != entity.index or col != entity.time_index]
else:
columns = [entity.index]
combined_df.drop_duplicates(columns, inplace=True)
if entity.time_index:
combined_df.sort_values([entity.time_index, entity.index], inplace=True)
else:
combined_df.sort_index(inplace=True)
if (entity.last_time_index is not None or
other[entity.id].last_time_index is not None):
has_last_time_index.append(entity.id)
combined_es[entity.id].update_data(df=combined_df,
recalculate_last_time_indexes=False)
combined_es.add_last_time_indexes(updated_entities=has_last_time_index)
self.reset_data_description()
return combined_es
###########################################################################
# Indexing methods ###############################################
###########################################################################
def add_last_time_indexes(self, updated_entities=None):
"""
Calculates the last time index values for each entity (the last time
an instance or children of that instance were observed). Used when
calculating features using training windows
Args:
updated_entities (list[str]): List of entity ids to update last_time_index for
(will update all parents of those entities as well)
"""
# Generate graph of entities to find leaf entities
children = defaultdict(list) # parent --> child mapping
child_vars = defaultdict(dict)
for r in self.relationships:
children[r.parent_entity.id].append(r.child_entity)
child_vars[r.parent_entity.id][r.child_entity.id] = r.child_variable
updated_entities = updated_entities or []
if updated_entities:
# find parents of updated_entities
parent_queue = updated_entities[:]
parents = set()
while len(parent_queue):
e = parent_queue.pop(0)
if e in parents:
continue
parents.add(e)
for parent_id, _ in self.get_forward_entities(e):
parent_queue.append(parent_id)
queue = [self[p] for p in parents]
to_explore = parents
else:
to_explore = set([e.id for e in self.entities[:]])
queue = self.entities[:]
explored = set()
for e in queue:
e.last_time_index = None
# We will explore children of entities on the queue,
# which may not be in the to_explore set. Therefore,
# we check whether all elements of to_explore are in
# explored, rather than just comparing length
while not to_explore.issubset(explored):
entity = queue.pop(0)
if entity.last_time_index is None:
if entity.time_index is not None:
lti = entity.df[entity.time_index].copy()
if isinstance(entity.df, dd.DataFrame):
# The current Dask implementation doesn't set the index of the dataframe
# to the entity's index, so we have to do it manually here
lti.index = entity.df[entity.index].copy()
else:
lti = entity.df[entity.index].copy()
if isinstance(entity.df, dd.DataFrame):
lti.index = entity.df[entity.index].copy()
lti = lti.apply(lambda x: None)
else:
lti[:] = None
entity.last_time_index = lti
if entity.id in children:
child_entities = children[entity.id]
# if all children not explored, skip for now
if not set([e.id for e in child_entities]).issubset(explored):
# Now there is a possibility that a child entity
# was not explicitly provided in updated_entities,
# and never made it onto the queue. If updated_entities
# is None then we just load all entities onto the queue
# so we didn't need this logic
for e in child_entities:
if e.id not in explored and e.id not in [q.id for q in queue]:
queue.append(e)
queue.append(entity)
continue
# updated last time from all children
for child_e in child_entities:
if child_e.last_time_index is None:
continue
link_var = child_vars[entity.id][child_e.id].id
if isinstance(child_e.last_time_index, dd.Series):
to_join = child_e.df[link_var]
to_join.index = child_e.df[child_e.index]
lti_df = child_e.last_time_index.to_frame(name='last_time').join(
to_join.to_frame(name=entity.index)
)
new_index = lti_df.index.copy()
new_index.name = None
lti_df.index = new_index
lti_df = lti_df.groupby(lti_df[entity.index]).agg('max')
lti_df = entity.last_time_index.to_frame(name='last_time_old').join(lti_df)
else:
lti_df = pd.DataFrame({'last_time': child_e.last_time_index,
entity.index: child_e.df[link_var]})
# sort by time and keep only the most recent
lti_df.sort_values(['last_time', entity.index],
kind="mergesort", inplace=True)
lti_df.drop_duplicates(entity.index,
keep='last',
inplace=True)
lti_df.set_index(entity.index, inplace=True)
lti_df = lti_df.reindex(entity.last_time_index.index)
lti_df['last_time_old'] = entity.last_time_index
if not isinstance(lti_df, dd.DataFrame) and lti_df.empty:
# Pandas errors out if it tries to do fillna and then max on an empty dataframe
lti_df = pd.Series()
else:
lti_df['last_time'] = lti_df['last_time'].astype('datetime64[ns]')
lti_df['last_time_old'] = lti_df['last_time_old'].astype('datetime64[ns]')
lti_df = lti_df.fillna(pd.to_datetime('1800-01-01 00:00')).max(axis=1)
lti_df = lti_df.replace(pd.to_datetime('1800-01-01 00:00'), pd.NaT)
# lti_df = lti_df.apply(lambda x: x.dropna().max(), axis=1)
entity.last_time_index = lti_df
entity.last_time_index.name = 'last_time'
explored.add(entity.id)
self.reset_data_description()
###########################################################################
# Other ###############################################
###########################################################################
def add_interesting_values(self, max_values=5, verbose=False):
"""Find interesting values for categorical variables, to be used to generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for entity in self.entities:
entity.add_interesting_values(max_values=max_values, verbose=verbose)
self.reset_data_description()
def plot(self, to_file=None):
"""
Create a UML diagram-ish graph of the EntitySet.
Args:
to_file (str, optional) : Path to where the plot should be saved.
If set to None (as by default), the plot will not be saved.
Returns:
graphviz.Digraph : Graph object that can directly be displayed in
Jupyter notebooks.
"""
GRAPHVIZ_ERR_MSG = ('Please install graphviz to plot entity sets.' +
' (See https://docs.featuretools.com/en/stable/getting_started/install.html#installing-graphviz for' +
' details)')
graphviz = import_or_raise("graphviz", GRAPHVIZ_ERR_MSG)
# Try rendering a dummy graph to see if a working backend is installed
try:
graphviz.Digraph().pipe()
except graphviz.backend.ExecutableNotFound:
raise RuntimeError(
"To plot entity sets, a graphviz backend is required.\n" +
"Install the backend using one of the following commands:\n" +
" Mac OS: brew install graphviz\n" +
" Linux (Ubuntu): sudo apt-get install graphviz\n" +
" Windows: conda install python-graphviz\n" +
" For more details visit: https://docs.featuretools.com/en/stable/getting_started/install.html"
)
if to_file:
# Explicitly cast to str in case a Path object was passed in
to_file = str(to_file)
split_path = to_file.split('.')
if len(split_path) < 2:
raise ValueError("Please use a file extension like '.pdf'" +
" so that the format can be inferred")
format = split_path[-1]
valid_formats = graphviz.backend.FORMATS
if format not in valid_formats:
raise ValueError("Unknown format. Make sure your format is" +
" amongst the following: %s" % valid_formats)
else:
format = None
# Initialize a new directed graph
graph = graphviz.Digraph(self.id, format=format,
graph_attr={'splines': 'ortho'})
# Draw entities
for entity in self.entities:
variables_string = '\l'.join([var.id + ' : ' + var.type_string # noqa: W605
for var in entity.variables])
nrows = entity.shape[0]
label = '{%s (%d row%s)|%s\l}' % (entity.id, nrows, 's' * (nrows > 1), variables_string) # noqa: W605
graph.node(entity.id, shape='record', label=label)
# Draw relationships
for rel in self.relationships:
# Display the key only once if is the same for both related entities
if rel._parent_variable_id == rel._child_variable_id:
label = rel._parent_variable_id
else:
label = '%s -> %s' % (rel._parent_variable_id,
rel._child_variable_id)
graph.edge(rel._child_entity_id, rel._parent_entity_id, xlabel=label)
if to_file:
# Graphviz always appends the format to the file name, so we need to
# remove it manually to avoid file names like 'file_name.pdf.pdf'
offset = len(format) + 1 # Add 1 for the dot
output_path = to_file[:-offset]
graph.render(output_path, cleanup=True)
return graph | def _forward_entity_paths(self, start_entity_id, seen_entities=None):
"""
Generator which yields the ids of all entities connected through forward
relationships, and the path taken to each. An entity will be yielded
multiple times if there are multiple paths to it.
Implemented using depth first search.
"""
if seen_entities is None:
seen_entities = set()
if start_entity_id in seen_entities:
return
seen_entities.add(start_entity_id)
yield start_entity_id, []
for relationship in self.get_forward_relationships(start_entity_id):
next_entity = relationship.parent_entity.id
# Copy seen entities for each next node to allow multiple paths (but
# not cycles).
descendants = self._forward_entity_paths(next_entity, seen_entities.copy())
for sub_entity_id, sub_path in descendants:
yield sub_entity_id, [relationship] + sub_path | 315 | 339 | import copy
import logging
from collections import defaultdict
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal, is_numeric_dtype
import featuretools.variable_types.variable as vtypes
from featuretools.entityset import deserialize, serialize
from featuretools.entityset.entity import Entity
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.utils.gen_utils import import_or_raise
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger('featuretools.entityset')
class EntitySet(object):
"""
Stores all actual data for a entityset
Attributes:
id
entity_dict
relationships
time_type
Properties:
metadata
"""
def __init__(self, id=None, entities=None, relationships=None):
"""Creates EntitySet
Args:
id (str) : Unique identifier to associate with this instance
entities (dict[str -> tuple(pd.DataFrame, str, str, dict[str -> Variable])]): dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_index), (variable_types), (make_index))}.
Note that time_index, variable_types and make_index are optional.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
Example:
.. code-block:: python
entities = {
"cards" : (card_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
ft.EntitySet("my-entity-set", entities, relationships)
"""
self.id = id
self.entity_dict = {}
self.relationships = []
self.time_type = None
entities = entities or {}
relationships = relationships or []
for entity in entities:
df = entities[entity][0]
index_column = entities[entity][1]
time_index = None
variable_types = None
make_index = None
if len(entities[entity]) > 2:
time_index = entities[entity][2]
if len(entities[entity]) > 3:
variable_types = entities[entity][3]
if len(entities[entity]) > 4:
make_index = entities[entity][4]
self.entity_from_dataframe(entity_id=entity,
dataframe=df,
index=index_column,
time_index=time_index,
variable_types=variable_types,
make_index=make_index)
for relationship in relationships:
parent_variable = self[relationship[0]][relationship[1]]
child_variable = self[relationship[2]][relationship[3]]
self.add_relationship(Relationship(parent_variable,
child_variable))
self.reset_data_description()
def __sizeof__(self):
return sum([entity.__sizeof__() for entity in self.entities])
def __dask_tokenize__(self):
return (EntitySet, serialize.entityset_to_description(self.metadata))
def __eq__(self, other, deep=False):
if len(self.entity_dict) != len(other.entity_dict):
return False
for eid, e in self.entity_dict.items():
if eid not in other.entity_dict:
return False
if not e.__eq__(other[eid], deep=deep):
return False
for r in other.relationships:
if r not in other.relationships:
return False
return True
def __ne__(self, other, deep=False):
return not self.__eq__(other, deep=deep)
def __getitem__(self, entity_id):
"""Get entity instance from entityset
Args:
entity_id (str): Id of entity.
Returns:
:class:`.Entity` : Instance of entity. None if entity doesn't
exist.
"""
if entity_id in self.entity_dict:
return self.entity_dict[entity_id]
name = self.id or "entity set"
raise KeyError('Entity %s does not exist in %s' % (entity_id, name))
@property
def entities(self):
return list(self.entity_dict.values())
@property
def metadata(self):
'''Returns the metadata for this EntitySet. The metadata will be recomputed if it does not exist.'''
if self._data_description is None:
description = serialize.entityset_to_description(self)
self._data_description = deserialize.description_to_entityset(description)
return self._data_description
def reset_data_description(self):
self._data_description = None
def to_pickle(self, path, compression=None, profile_name=None):
'''Write entityset in the pickle format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='pickle', compression=compression, profile_name=profile_name)
return self
def to_parquet(self, path, engine='auto', compression=None, profile_name=None):
'''Write entityset to disk in the parquet format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
engine (str) : Name of the engine to use. Possible values are: {'auto', 'pyarrow', 'fastparquet'}.
compression (str) : Name of the compression to use. Possible values are: {'snappy', 'gzip', 'brotli', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='parquet', engine=engine, compression=compression, profile_name=profile_name)
return self
def to_csv(self, path, sep=',', encoding='utf-8', engine='python', compression=None, profile_name=None):
'''Write entityset to disk in the csv format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str) : Location on disk to write to (will be created as a directory)
sep (str) : String of length 1. Field delimiter for the output file.
encoding (str) : A string representing the encoding to use in the output file, defaults to 'utf-8'.
engine (str) : Name of the engine to use. Possible values are: {'c', 'python'}.
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='csv', index=False, sep=sep, encoding=encoding, engine=engine, compression=compression, profile_name=profile_name)
return self
def to_dictionary(self):
return serialize.entityset_to_description(self)
###########################################################################
# Public getter/setter methods #########################################
###########################################################################
def __repr__(self):
repr_out = u"Entityset: {}\n".format(self.id)
repr_out += u" Entities:"
for e in self.entities:
if e.df.shape:
repr_out += u"\n {} [Rows: {}, Columns: {}]".format(
e.id, e.df.shape[0], e.df.shape[1])
else:
repr_out += u"\n {} [Rows: None, Columns: None]".format(
e.id)
repr_out += "\n Relationships:"
if len(self.relationships) == 0:
repr_out += u"\n No relationships"
for r in self.relationships:
repr_out += u"\n %s.%s -> %s.%s" % \
(r._child_entity_id, r._child_variable_id,
r._parent_entity_id, r._parent_variable_id)
return repr_out
def add_relationships(self, relationships):
"""Add multiple new relationships to a entityset
Args:
relationships (list[Relationship]) : List of new
relationships.
"""
return [self.add_relationship(r) for r in relationships][-1]
def add_relationship(self, relationship):
"""Add a new relationship between entities in the entityset
Args:
relationship (Relationship) : Instance of new
relationship to be added.
"""
if relationship in self.relationships:
logger.warning(
"Not adding duplicate relationship: %s", relationship)
return self
# _operations?
# this is a new pair of entities
child_e = relationship.child_entity
child_v = relationship.child_variable.id
parent_e = relationship.parent_entity
parent_v = relationship.parent_variable.id
if not isinstance(child_e[child_v], vtypes.Id):
child_e.convert_variable_type(variable_id=child_v,
new_type=vtypes.Id,
convert_data=False)
if not isinstance(parent_e[parent_v], vtypes.Index):
parent_e.convert_variable_type(variable_id=parent_v,
new_type=vtypes.Index,
convert_data=False)
# Empty dataframes (as a result of accessing Entity.metadata)
# default to object dtypes for discrete variables, but
# indexes/ids default to ints. In this case, we convert
# the empty column's type to int
if isinstance(child_e.df, pd.DataFrame) and \
(child_e.df.empty and child_e.df[child_v].dtype == object and
is_numeric_dtype(parent_e.df[parent_v])):
child_e.df[child_v] = pd.Series(name=child_v, dtype=np.int64)
parent_dtype = parent_e.df[parent_v].dtype
child_dtype = child_e.df[child_v].dtype
msg = u"Unable to add relationship because {} in {} is Pandas dtype {}"\
u" and {} in {} is Pandas dtype {}."
if not is_dtype_equal(parent_dtype, child_dtype):
raise ValueError(msg.format(parent_v, parent_e.id, parent_dtype,
child_v, child_e.id, child_dtype))
self.relationships.append(relationship)
self.reset_data_description()
return self
###########################################################################
# Relationship access/helper methods ###################################
###########################################################################
def find_forward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all forward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : id of entity to start the search from
goal_entity_id (str) : if of entity to find forward path to
See Also:
:func:`BaseEntitySet.find_backward_paths`
"""
for sub_entity_id, path in self._forward_entity_paths(start_entity_id):
if sub_entity_id == goal_entity_id:
yield path
def find_backward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all backward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : Id of entity to start the search from.
goal_entity_id (str) : Id of entity to find backward path to.
See Also:
:func:`BaseEntitySet.find_forward_paths`
"""
for path in self.find_forward_paths(goal_entity_id, start_entity_id):
# Reverse path
yield path[::-1]
def _forward_entity_paths(self, start_entity_id, seen_entities=None):
"""
Generator which yields the ids of all entities connected through forward
relationships, and the path taken to each. An entity will be yielded
multiple times if there are multiple paths to it.
Implemented using depth first search.
"""
if seen_entities is None:
seen_entities = set()
if start_entity_id in seen_entities:
return
seen_entities.add(start_entity_id)
yield start_entity_id, []
for relationship in self.get_forward_relationships(start_entity_id):
next_entity = relationship.parent_entity.id
# Copy seen entities for each next node to allow multiple paths (but
# not cycles).
descendants = self._forward_entity_paths(next_entity, seen_entities.copy())
for sub_entity_id, sub_path in descendants:
yield sub_entity_id, [relationship] + sub_path
def get_forward_entities(self, entity_id, deep=False):
"""
Get entities that are in a forward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find forward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_forward_relationships(entity_id):
parent_eid = relationship.parent_entity.id
direct_path = RelationshipPath([(True, relationship)])
yield parent_eid, direct_path
if deep:
sub_entities = self.get_forward_entities(parent_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_backward_entities(self, entity_id, deep=False):
"""
Get entities that are in a backward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find backward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_backward_relationships(entity_id):
child_eid = relationship.child_entity.id
direct_path = RelationshipPath([(False, relationship)])
yield child_eid, direct_path
if deep:
sub_entities = self.get_backward_entities(child_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_forward_relationships(self, entity_id):
"""Get relationships where entity "entity_id" is the child
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: List of forward relationships.
"""
return [r for r in self.relationships if r.child_entity.id == entity_id]
def get_backward_relationships(self, entity_id):
"""
get relationships where entity "entity_id" is the parent.
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: list of backward relationships
"""
return [r for r in self.relationships if r.parent_entity.id == entity_id]
def has_unique_forward_path(self, start_entity_id, end_entity_id):
"""
Is the forward path from start to end unique?
This will raise if there is no such path.
"""
paths = self.find_forward_paths(start_entity_id, end_entity_id)
next(paths)
second_path = next(paths, None)
return not second_path
###########################################################################
# Entity creation methods ##############################################
###########################################################################
def entity_from_dataframe(self,
entity_id,
dataframe,
index=None,
variable_types=None,
make_index=False,
time_index=None,
secondary_time_index=None,
already_sorted=False):
"""
Load the data for a specified entity from a Pandas DataFrame.
Args:
entity_id (str) : Unique id to associate with this entity.
dataframe (pandas.DataFrame) : Dataframe containing the data.
index (str, optional): Name of the variable used to index the entity.
If None, take the first column.
variable_types (dict[str -> Variable/str], optional):
Keys are of variable ids and values are variable types or type_strings. Used to to
initialize an entity's store.
make_index (bool, optional) : If True, assume index does not
exist as a column in dataframe, and create a new column of that name
using integers. Otherwise, assume index exists.
time_index (str, optional): Name of the variable containing
time data. Type must be in :class:`variables.DateTime` or be
able to be cast to datetime (e.g. str, float, or numeric.)
secondary_time_index (dict[str -> Variable]): Name of variable
containing time data to use a second time index for the entity.
already_sorted (bool, optional) : If True, assumes that input dataframe
is already sorted by time. Defaults to False.
Notes:
Will infer variable types from Pandas dtype
Example:
.. ipython:: python
import featuretools as ft
import pandas as pd
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"session_id": [1, 2, 1, 3, 4, 5],
"amount": [100.40, 20.63, 33.32, 13.12, 67.22, 1.00],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"fraud": [True, False, True, False, True, True]})
es = ft.EntitySet("example")
es.entity_from_dataframe(entity_id="transactions",
index="id",
time_index="transaction_time",
dataframe=transactions_df)
es["transactions"]
es["transactions"].df
"""
variable_types = variable_types or {}
if time_index is not None and time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (time_index))
if time_index is None:
for variable, variable_type in variable_types.items():
if variable_type == vtypes.DatetimeTimeIndex:
raise ValueError("DatetimeTimeIndex variable %s must be set using time_index parameter" % (variable))
if len(self.entities) > 0:
if not isinstance(dataframe, type(self.entities[0].df)):
raise ValueError("All entity dataframes must be of the same type. "
"Cannot add entity of type {} to an entityset with existing entities "
"of type {}".format(type(dataframe), type(self.entities[0].df)))
entity = Entity(
entity_id,
dataframe,
self,
variable_types=variable_types,
index=index,
time_index=time_index,
secondary_time_index=secondary_time_index,
already_sorted=already_sorted,
make_index=make_index)
self.entity_dict[entity.id] = entity
self.reset_data_description()
return self
def normalize_entity(self, base_entity_id, new_entity_id, index,
additional_variables=None, copy_variables=None,
make_time_index=None,
make_secondary_time_index=None,
new_entity_time_index=None,
new_entity_secondary_time_index=None):
"""Create a new entity and relationship from unique values of an existing variable.
Args:
base_entity_id (str) : Entity id from which to split.
new_entity_id (str): Id of the new entity.
index (str): Variable in old entity
that will become index of new entity. Relationship
will be created across this variable.
additional_variables (list[str]):
List of variable ids to remove from
base_entity and move to new entity.
copy_variables (list[str]): List of
variable ids to copy from old entity
and move to new entity.
make_time_index (bool or str, optional): Create time index for new entity based
on time index in base_entity, optionally specifying which variable in base_entity
to use for time_index. If specified as True without a specific variable,
uses the primary time index. Defaults to True if base entity has a time index.
make_secondary_time_index (dict[str -> list[str]], optional): Create a secondary time index
from key. Values of dictionary
are the variables to associate with the secondary time index. Only one
secondary time index is allowed. If None, only associate the time index.
new_entity_time_index (str, optional): Rename new entity time index.
new_entity_secondary_time_index (str, optional): Rename new entity secondary time index.
"""
base_entity = self.entity_dict[base_entity_id]
additional_variables = additional_variables or []
copy_variables = copy_variables or []
# Check base entity to make sure time index is valid
if base_entity.time_index is not None:
t_index = base_entity[base_entity.time_index]
if not isinstance(t_index, (vtypes.NumericTimeIndex, vtypes.DatetimeTimeIndex)):
base_error = "Time index '{0}' is not a NumericTimeIndex or DatetimeTimeIndex, but type {1}. Use set_time_index on entity '{2}' to set the time_index."
raise TypeError(base_error.format(base_entity.time_index, type(t_index), str(base_entity.id)))
if not isinstance(additional_variables, list):
raise TypeError("'additional_variables' must be a list, but received type {}"
.format(type(additional_variables)))
if len(additional_variables) != len(set(additional_variables)):
raise ValueError("'additional_variables' contains duplicate variables. All variables must be unique.")
if not isinstance(copy_variables, list):
raise TypeError("'copy_variables' must be a list, but received type {}"
.format(type(copy_variables)))
if len(copy_variables) != len(set(copy_variables)):
raise ValueError("'copy_variables' contains duplicate variables. All variables must be unique.")
for v in additional_variables + copy_variables:
if v == index:
raise ValueError("Not copying {} as both index and variable".format(v))
for v in additional_variables:
if v == base_entity.time_index:
raise ValueError("Not moving {} as it is the base time index variable. Perhaps, move the variable to the copy_variables.".format(v))
if isinstance(make_time_index, str):
if make_time_index not in base_entity.df.columns:
raise ValueError("'make_time_index' must be a variable in the base entity")
elif make_time_index not in additional_variables + copy_variables:
raise ValueError("'make_time_index' must be specified in 'additional_variables' or 'copy_variables'")
if index == base_entity.index:
raise ValueError("'index' must be different from the index column of the base entity")
transfer_types = {}
transfer_types[index] = type(base_entity[index])
for v in additional_variables + copy_variables:
if type(base_entity[v]) == vtypes.DatetimeTimeIndex:
transfer_types[v] = vtypes.Datetime
elif type(base_entity[v]) == vtypes.NumericTimeIndex:
transfer_types[v] = vtypes.Numeric
else:
transfer_types[v] = type(base_entity[v])
# create and add new entity
new_entity_df = self[base_entity_id].df.copy()
if make_time_index is None and base_entity.time_index is not None:
make_time_index = True
if isinstance(make_time_index, str):
# Set the new time index to make_time_index.
base_time_index = make_time_index
new_entity_time_index = make_time_index
already_sorted = (new_entity_time_index == base_entity.time_index)
elif make_time_index:
# Create a new time index based on the base entity time index.
base_time_index = base_entity.time_index
if new_entity_time_index is None:
new_entity_time_index = "first_%s_time" % (base_entity.id)
already_sorted = True
assert base_entity.time_index is not None, \
"Base entity doesn't have time_index defined"
if base_time_index not in [v for v in additional_variables]:
copy_variables.append(base_time_index)
transfer_types[new_entity_time_index] = type(base_entity[base_entity.time_index])
else:
new_entity_time_index = None
already_sorted = False
if new_entity_time_index is not None and new_entity_time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (new_entity_time_index))
selected_variables = [index] +\
[v for v in additional_variables] +\
[v for v in copy_variables]
new_entity_df2 = new_entity_df. \
drop_duplicates(index, keep='first')[selected_variables]
if make_time_index:
new_entity_df2 = new_entity_df2.rename(columns={base_time_index: new_entity_time_index})
if make_secondary_time_index:
assert len(make_secondary_time_index) == 1, "Can only provide 1 secondary time index"
secondary_time_index = list(make_secondary_time_index.keys())[0]
secondary_variables = [index, secondary_time_index] + list(make_secondary_time_index.values())[0]
secondary_df = new_entity_df. \
drop_duplicates(index, keep='last')[secondary_variables]
if new_entity_secondary_time_index:
secondary_df = secondary_df.rename(columns={secondary_time_index: new_entity_secondary_time_index})
secondary_time_index = new_entity_secondary_time_index
else:
new_entity_secondary_time_index = secondary_time_index
secondary_df = secondary_df.set_index(index)
new_entity_df = new_entity_df2.join(secondary_df, on=index)
else:
new_entity_df = new_entity_df2
base_entity_index = index
transfer_types[index] = vtypes.Categorical
if make_secondary_time_index:
old_ti_name = list(make_secondary_time_index.keys())[0]
ti_cols = list(make_secondary_time_index.values())[0]
ti_cols = [c if c != old_ti_name else secondary_time_index for c in ti_cols]
make_secondary_time_index = {secondary_time_index: ti_cols}
self.entity_from_dataframe(
new_entity_id,
new_entity_df,
index,
already_sorted=already_sorted,
time_index=new_entity_time_index,
secondary_time_index=make_secondary_time_index,
variable_types=transfer_types)
self.entity_dict[base_entity_id].delete_variables(additional_variables)
new_entity = self.entity_dict[new_entity_id]
base_entity.convert_variable_type(base_entity_index, vtypes.Id, convert_data=False)
self.add_relationship(Relationship(new_entity[index], base_entity[base_entity_index]))
self.reset_data_description()
return self
###########################################################################
# Data wrangling methods ###############################################
###########################################################################
def concat(self, other, inplace=False):
'''Combine entityset with another to create a new entityset with the
combined data of both entitysets.
'''
assert_string = "Entitysets must have the same entities, relationships"\
", and variable_ids"
assert (self.__eq__(other) and
self.relationships == other.relationships), assert_string
for entity in self.entities:
assert entity.id in other.entity_dict, assert_string
assert (len(self[entity.id].variables) ==
len(other[entity.id].variables)), assert_string
other_variable_ids = [o_variable.id for o_variable in
other[entity.id].variables]
assert (all([variable.id in other_variable_ids
for variable in self[entity.id].variables])), assert_string
if inplace:
combined_es = self
else:
combined_es = copy.deepcopy(self)
has_last_time_index = []
for entity in self.entities:
self_df = entity.df
other_df = other[entity.id].df
combined_df = pd.concat([self_df, other_df])
if entity.created_index == entity.index:
columns = [col for col in combined_df.columns if
col != entity.index or col != entity.time_index]
else:
columns = [entity.index]
combined_df.drop_duplicates(columns, inplace=True)
if entity.time_index:
combined_df.sort_values([entity.time_index, entity.index], inplace=True)
else:
combined_df.sort_index(inplace=True)
if (entity.last_time_index is not None or
other[entity.id].last_time_index is not None):
has_last_time_index.append(entity.id)
combined_es[entity.id].update_data(df=combined_df,
recalculate_last_time_indexes=False)
combined_es.add_last_time_indexes(updated_entities=has_last_time_index)
self.reset_data_description()
return combined_es
###########################################################################
# Indexing methods ###############################################
###########################################################################
def add_last_time_indexes(self, updated_entities=None):
"""
Calculates the last time index values for each entity (the last time
an instance or children of that instance were observed). Used when
calculating features using training windows
Args:
updated_entities (list[str]): List of entity ids to update last_time_index for
(will update all parents of those entities as well)
"""
# Generate graph of entities to find leaf entities
children = defaultdict(list) # parent --> child mapping
child_vars = defaultdict(dict)
for r in self.relationships:
children[r.parent_entity.id].append(r.child_entity)
child_vars[r.parent_entity.id][r.child_entity.id] = r.child_variable
updated_entities = updated_entities or []
if updated_entities:
# find parents of updated_entities
parent_queue = updated_entities[:]
parents = set()
while len(parent_queue):
e = parent_queue.pop(0)
if e in parents:
continue
parents.add(e)
for parent_id, _ in self.get_forward_entities(e):
parent_queue.append(parent_id)
queue = [self[p] for p in parents]
to_explore = parents
else:
to_explore = set([e.id for e in self.entities[:]])
queue = self.entities[:]
explored = set()
for e in queue:
e.last_time_index = None
# We will explore children of entities on the queue,
# which may not be in the to_explore set. Therefore,
# we check whether all elements of to_explore are in
# explored, rather than just comparing length
while not to_explore.issubset(explored):
entity = queue.pop(0)
if entity.last_time_index is None:
if entity.time_index is not None:
lti = entity.df[entity.time_index].copy()
if isinstance(entity.df, dd.DataFrame):
# The current Dask implementation doesn't set the index of the dataframe
# to the entity's index, so we have to do it manually here
lti.index = entity.df[entity.index].copy()
else:
lti = entity.df[entity.index].copy()
if isinstance(entity.df, dd.DataFrame):
lti.index = entity.df[entity.index].copy()
lti = lti.apply(lambda x: None)
else:
lti[:] = None
entity.last_time_index = lti
if entity.id in children:
child_entities = children[entity.id]
# if all children not explored, skip for now
if not set([e.id for e in child_entities]).issubset(explored):
# Now there is a possibility that a child entity
# was not explicitly provided in updated_entities,
# and never made it onto the queue. If updated_entities
# is None then we just load all entities onto the queue
# so we didn't need this logic
for e in child_entities:
if e.id not in explored and e.id not in [q.id for q in queue]:
queue.append(e)
queue.append(entity)
continue
# updated last time from all children
for child_e in child_entities:
if child_e.last_time_index is None:
continue
link_var = child_vars[entity.id][child_e.id].id
if isinstance(child_e.last_time_index, dd.Series):
to_join = child_e.df[link_var]
to_join.index = child_e.df[child_e.index]
lti_df = child_e.last_time_index.to_frame(name='last_time').join(
to_join.to_frame(name=entity.index)
)
new_index = lti_df.index.copy()
new_index.name = None
lti_df.index = new_index
lti_df = lti_df.groupby(lti_df[entity.index]).agg('max')
lti_df = entity.last_time_index.to_frame(name='last_time_old').join(lti_df)
else:
lti_df = pd.DataFrame({'last_time': child_e.last_time_index,
entity.index: child_e.df[link_var]})
# sort by time and keep only the most recent
lti_df.sort_values(['last_time', entity.index],
kind="mergesort", inplace=True)
lti_df.drop_duplicates(entity.index,
keep='last',
inplace=True)
lti_df.set_index(entity.index, inplace=True)
lti_df = lti_df.reindex(entity.last_time_index.index)
lti_df['last_time_old'] = entity.last_time_index
if not isinstance(lti_df, dd.DataFrame) and lti_df.empty:
# Pandas errors out if it tries to do fillna and then max on an empty dataframe
lti_df = pd.Series()
else:
lti_df['last_time'] = lti_df['last_time'].astype('datetime64[ns]')
lti_df['last_time_old'] = lti_df['last_time_old'].astype('datetime64[ns]')
lti_df = lti_df.fillna(pd.to_datetime('1800-01-01 00:00')).max(axis=1)
lti_df = lti_df.replace(pd.to_datetime('1800-01-01 00:00'), pd.NaT)
# lti_df = lti_df.apply(lambda x: x.dropna().max(), axis=1)
entity.last_time_index = lti_df
entity.last_time_index.name = 'last_time'
explored.add(entity.id)
self.reset_data_description()
###########################################################################
# Other ###############################################
###########################################################################
def add_interesting_values(self, max_values=5, verbose=False):
"""Find interesting values for categorical variables, to be used to generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for entity in self.entities:
entity.add_interesting_values(max_values=max_values, verbose=verbose)
self.reset_data_description()
def plot(self, to_file=None):
"""
Create a UML diagram-ish graph of the EntitySet.
Args:
to_file (str, optional) : Path to where the plot should be saved.
If set to None (as by default), the plot will not be saved.
Returns:
graphviz.Digraph : Graph object that can directly be displayed in
Jupyter notebooks.
"""
GRAPHVIZ_ERR_MSG = ('Please install graphviz to plot entity sets.' +
' (See https://docs.featuretools.com/en/stable/getting_started/install.html#installing-graphviz for' +
' details)')
graphviz = import_or_raise("graphviz", GRAPHVIZ_ERR_MSG)
# Try rendering a dummy graph to see if a working backend is installed
try:
graphviz.Digraph().pipe()
except graphviz.backend.ExecutableNotFound:
raise RuntimeError(
"To plot entity sets, a graphviz backend is required.\n" +
"Install the backend using one of the following commands:\n" +
" Mac OS: brew install graphviz\n" +
" Linux (Ubuntu): sudo apt-get install graphviz\n" +
" Windows: conda install python-graphviz\n" +
" For more details visit: https://docs.featuretools.com/en/stable/getting_started/install.html"
)
if to_file:
# Explicitly cast to str in case a Path object was passed in
to_file = str(to_file)
split_path = to_file.split('.')
if len(split_path) < 2:
raise ValueError("Please use a file extension like '.pdf'" +
" so that the format can be inferred")
format = split_path[-1]
valid_formats = graphviz.backend.FORMATS
if format not in valid_formats:
raise ValueError("Unknown format. Make sure your format is" +
" amongst the following: %s" % valid_formats)
else:
format = None
# Initialize a new directed graph
graph = graphviz.Digraph(self.id, format=format,
graph_attr={'splines': 'ortho'})
# Draw entities
for entity in self.entities:
variables_string = '\l'.join([var.id + ' : ' + var.type_string # noqa: W605
for var in entity.variables])
nrows = entity.shape[0]
label = '{%s (%d row%s)|%s\l}' % (entity.id, nrows, 's' * (nrows > 1), variables_string) # noqa: W605
graph.node(entity.id, shape='record', label=label)
# Draw relationships
for rel in self.relationships:
# Display the key only once if is the same for both related entities
if rel._parent_variable_id == rel._child_variable_id:
label = rel._parent_variable_id
else:
label = '%s -> %s' % (rel._parent_variable_id,
rel._child_variable_id)
graph.edge(rel._child_entity_id, rel._parent_entity_id, xlabel=label)
if to_file:
# Graphviz always appends the format to the file name, so we need to
# remove it manually to avoid file names like 'file_name.pdf.pdf'
offset = len(format) + 1 # Add 1 for the dot
output_path = to_file[:-offset]
graph.render(output_path, cleanup=True)
return graph
|
iterator | If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships. | from __future__ import unicode_literals
from future.builtins import int, zip
from functools import reduce
from operator import ior, iand
from string import punctuation
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Manager, Q, CharField, TextField
from django.db.models.loading import get_models
from django.db.models.manager import ManagerDescriptor
from django.db.models.query import QuerySet
from django.contrib.sites.managers import CurrentSiteManager as DjangoCSM
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.utils.models import get_model
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import home_slug
class PublishedManager(Manager):
"""
Provides filter for restricting items returned by status and
publish date when the given user is not a staff member.
"""
def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED))
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def search_fields_to_dict(fields):
"""
In ``SearchableQuerySet`` and ``SearchableManager``, search fields
can either be a sequence, or a dict of fields mapped to weights.
This function converts sequences to a dict mapped to even weights,
so that we're consistently dealing with a dict of fields mapped to
weights, eg: ("title", "content") -> {"title": 1, "content": 1}
"""
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (TypeError, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields
class SearchableQuerySet(QuerySet):
"""
QuerySet providing main search functionality for
``SearchableManager``.
"""
def __init__(self, *args, **kwargs):
self._search_ordered = False
self._search_terms = set()
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableQuerySet, self).__init__(*args, **kwargs)
def search(self, query, search_fields=None):
"""
Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude.
"""
# ### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
# ### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = " ".join(query.split()).replace("+ ", "+") \
.replace('+"', '"+') \
.replace("- ", "-") \
.replace('-"', '"-') \
.split('"')
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in
settings.STOP_WORDS]
get_positive_terms = lambda terms: [t.lower().strip(punctuation)
for t in terms if t[0:1] != "-"]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [reduce(iand, [~Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "-"]
required = [reduce(ior, [Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "+"]
optional = [reduce(ior, [Q(**{"%s__icontains" % f: t}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] not in "+-"]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct()
def _clone(self, *args, **kwargs):
"""
Ensure attributes are copied to subsequent queries.
"""
for attr in ("_search_terms", "_search_fields", "_search_ordered"):
kwargs[attr] = getattr(self, attr)
return super(SearchableQuerySet, self)._clone(*args, **kwargs)
def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names)
# MASKED: iterator function (lines 161-192)
class SearchableManager(Manager):
"""
Manager providing a chainable queryset.
Adapted from http://www.djangosnippets.org/snippets/562/
search method supports spanning across models that subclass the
model being used to search.
"""
def __init__(self, *args, **kwargs):
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableManager, self).__init__(*args, **kwargs)
def get_search_fields(self):
"""
Returns the search field names mapped to weights as a dict.
Used in ``get_queryset`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances.
"""
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.fields:
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields
def get_queryset(self):
search_fields = self.get_search_fields()
return SearchableQuerySet(self.model, search_fields=search_fields)
def contribute_to_class(self, model, name):
"""
Django 1.5 explicitly prevents managers being accessed from
abstract classes, which is behaviour the search API has relied
on for years. Here we reinstate it.
"""
super(SearchableManager, self).contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self))
def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in get_models() if issubclass(m, self.model)]
parents = reduce(ior, [m._meta.get_parent_list() for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured("Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors))
for model in get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs))
return sorted(all_results, key=lambda r: r.result_count, reverse=True)
class CurrentSiteManager(DjangoCSM):
"""
Extends Django's site manager to first look up site by ID stored in
the request, the session, then domain for the current request
(accessible via threadlocals in ``mezzanine.core.request``), the
environment variable ``MEZZANINE_SITE_ID`` (which can be used by
management commands with the ``--site`` arg, finally falling back
to ``settings.SITE_ID`` if none of those match a site.
"""
def __init__(self, field_name=None, *args, **kwargs):
super(DjangoCSM, self).__init__(*args, **kwargs)
self.__field_name = field_name
self.__is_validated = False
def get_queryset(self):
if not self.__is_validated:
try:
# Django <= 1.6
self._validate_field_name()
except AttributeError:
# Django >= 1.7: will populate "self.__field_name".
self._get_field_name()
lookup = {self.__field_name + "__id__exact": current_site_id()}
return super(DjangoCSM, self).get_queryset().filter(**lookup)
class DisplayableManager(CurrentSiteManager, PublishedManager,
SearchableManager):
"""
Manually combines ``CurrentSiteManager``, ``PublishedManager``
and ``SearchableManager`` for the ``Displayable`` model.
"""
def url_map(self, for_user=None, **kwargs):
"""
Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``mezzanine.core.sitemaps``.
"""
home = self.model(title=_("Home"))
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in get_models():
if issubclass(model, self.model):
for item in (model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items | def iterator(self):
"""
If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships.
"""
results = super(SearchableQuerySet, self).iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
results[i].result_count = count
return iter(results)
return results | 161 | 192 | from __future__ import unicode_literals
from future.builtins import int, zip
from functools import reduce
from operator import ior, iand
from string import punctuation
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Manager, Q, CharField, TextField
from django.db.models.loading import get_models
from django.db.models.manager import ManagerDescriptor
from django.db.models.query import QuerySet
from django.contrib.sites.managers import CurrentSiteManager as DjangoCSM
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.utils.models import get_model
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import home_slug
class PublishedManager(Manager):
"""
Provides filter for restricting items returned by status and
publish date when the given user is not a staff member.
"""
def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED))
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def search_fields_to_dict(fields):
"""
In ``SearchableQuerySet`` and ``SearchableManager``, search fields
can either be a sequence, or a dict of fields mapped to weights.
This function converts sequences to a dict mapped to even weights,
so that we're consistently dealing with a dict of fields mapped to
weights, eg: ("title", "content") -> {"title": 1, "content": 1}
"""
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (TypeError, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields
class SearchableQuerySet(QuerySet):
"""
QuerySet providing main search functionality for
``SearchableManager``.
"""
def __init__(self, *args, **kwargs):
self._search_ordered = False
self._search_terms = set()
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableQuerySet, self).__init__(*args, **kwargs)
def search(self, query, search_fields=None):
"""
Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude.
"""
# ### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
# ### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = " ".join(query.split()).replace("+ ", "+") \
.replace('+"', '"+') \
.replace("- ", "-") \
.replace('-"', '"-') \
.split('"')
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in
settings.STOP_WORDS]
get_positive_terms = lambda terms: [t.lower().strip(punctuation)
for t in terms if t[0:1] != "-"]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [reduce(iand, [~Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "-"]
required = [reduce(ior, [Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "+"]
optional = [reduce(ior, [Q(**{"%s__icontains" % f: t}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] not in "+-"]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct()
def _clone(self, *args, **kwargs):
"""
Ensure attributes are copied to subsequent queries.
"""
for attr in ("_search_terms", "_search_fields", "_search_ordered"):
kwargs[attr] = getattr(self, attr)
return super(SearchableQuerySet, self)._clone(*args, **kwargs)
def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names)
def iterator(self):
"""
If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships.
"""
results = super(SearchableQuerySet, self).iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
results[i].result_count = count
return iter(results)
return results
class SearchableManager(Manager):
"""
Manager providing a chainable queryset.
Adapted from http://www.djangosnippets.org/snippets/562/
search method supports spanning across models that subclass the
model being used to search.
"""
def __init__(self, *args, **kwargs):
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableManager, self).__init__(*args, **kwargs)
def get_search_fields(self):
"""
Returns the search field names mapped to weights as a dict.
Used in ``get_queryset`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances.
"""
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.fields:
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields
def get_queryset(self):
search_fields = self.get_search_fields()
return SearchableQuerySet(self.model, search_fields=search_fields)
def contribute_to_class(self, model, name):
"""
Django 1.5 explicitly prevents managers being accessed from
abstract classes, which is behaviour the search API has relied
on for years. Here we reinstate it.
"""
super(SearchableManager, self).contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self))
def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in get_models() if issubclass(m, self.model)]
parents = reduce(ior, [m._meta.get_parent_list() for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured("Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors))
for model in get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs))
return sorted(all_results, key=lambda r: r.result_count, reverse=True)
class CurrentSiteManager(DjangoCSM):
"""
Extends Django's site manager to first look up site by ID stored in
the request, the session, then domain for the current request
(accessible via threadlocals in ``mezzanine.core.request``), the
environment variable ``MEZZANINE_SITE_ID`` (which can be used by
management commands with the ``--site`` arg, finally falling back
to ``settings.SITE_ID`` if none of those match a site.
"""
def __init__(self, field_name=None, *args, **kwargs):
super(DjangoCSM, self).__init__(*args, **kwargs)
self.__field_name = field_name
self.__is_validated = False
def get_queryset(self):
if not self.__is_validated:
try:
# Django <= 1.6
self._validate_field_name()
except AttributeError:
# Django >= 1.7: will populate "self.__field_name".
self._get_field_name()
lookup = {self.__field_name + "__id__exact": current_site_id()}
return super(DjangoCSM, self).get_queryset().filter(**lookup)
class DisplayableManager(CurrentSiteManager, PublishedManager,
SearchableManager):
"""
Manually combines ``CurrentSiteManager``, ``PublishedManager``
and ``SearchableManager`` for the ``Displayable`` model.
"""
def url_map(self, for_user=None, **kwargs):
"""
Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``mezzanine.core.sitemaps``.
"""
home = self.model(title=_("Home"))
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in get_models():
if issubclass(model, self.model):
for item in (model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items
|
prep | Run all steps to prepare a release.
- Tag the commit.
- Build the sdist package.
- Generate the Markdown changelog to ``changelog.md``.
- Bump the version number to the next version. | #!/usr/bin/env python3
"""A utility script for automating the beets release process.
"""
import click
import os
import re
import subprocess
from contextlib import contextmanager
import datetime
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHANGELOG = os.path.join(BASE, 'docs', 'changelog.rst')
@contextmanager
def chdir(d):
"""A context manager that temporary changes the working directory.
"""
olddir = os.getcwd()
os.chdir(d)
yield
os.chdir(olddir)
@click.group()
def release():
pass
# Locations (filenames and patterns) of the version number.
VERSION_LOCS = [
(
os.path.join(BASE, 'beets', '__init__.py'),
[
(
r'__version__\s*=\s*u[\'"]([0-9\.]+)[\'"]',
"__version__ = '{version}'",
)
]
),
(
os.path.join(BASE, 'docs', 'conf.py'),
[
(
r'version\s*=\s*[\'"]([0-9\.]+)[\'"]',
"version = '{minor}'",
),
(
r'release\s*=\s*[\'"]([0-9\.]+)[\'"]',
"release = '{version}'",
),
]
),
(
os.path.join(BASE, 'setup.py'),
[
(
r'\s*version\s*=\s*[\'"]([0-9\.]+)[\'"]',
" version='{version}',",
)
]
),
]
GITHUB_USER = 'beetbox'
GITHUB_REPO = 'beets'
def bump_version(version):
"""Update the version number in setup.py, docs config, changelog,
and root module.
"""
version_parts = [int(p) for p in version.split('.')]
assert len(version_parts) == 3, "invalid version number"
minor = '{}.{}'.format(*version_parts)
major = '{}'.format(*version_parts)
# Replace the version each place where it lives.
for filename, locations in VERSION_LOCS:
# Read and transform the file.
out_lines = []
with open(filename) as f:
found = False
for line in f:
for pattern, template in locations:
match = re.match(pattern, line)
if match:
# Check that this version is actually newer.
old_version = match.group(1)
old_parts = [int(p) for p in old_version.split('.')]
assert version_parts > old_parts, \
"version must be newer than {}".format(
old_version
)
# Insert the new version.
out_lines.append(template.format(
version=version,
major=major,
minor=minor,
) + '\n')
found = True
break
else:
# Normal line.
out_lines.append(line)
if not found:
print(f"No pattern found in {filename}")
# Write the file back.
with open(filename, 'w') as f:
f.write(''.join(out_lines))
# Generate bits to insert into changelog.
header_line = f'{version} (in development)'
header = '\n\n' + header_line + '\n' + '-' * len(header_line) + '\n\n'
header += 'Changelog goes here!\n'
# Insert into the right place.
with open(CHANGELOG) as f:
contents = f.read()
location = contents.find('\n\n') # First blank line.
contents = contents[:location] + header + contents[location:]
# Write back.
with open(CHANGELOG, 'w') as f:
f.write(contents)
@release.command()
@click.argument('version')
def bump(version):
"""Bump the version number.
"""
bump_version(version)
def get_latest_changelog():
"""Extract the first section of the changelog.
"""
started = False
lines = []
with open(CHANGELOG) as f:
for line in f:
if re.match(r'^--+$', line.strip()):
# Section boundary. Start or end.
if started:
# Remove last line, which is the header of the next
# section.
del lines[-1]
break
else:
started = True
elif started:
lines.append(line)
return ''.join(lines).strip()
def rst2md(text):
"""Use Pandoc to convert text from ReST to Markdown.
"""
pandoc = subprocess.Popen(
['pandoc', '--from=rst', '--to=markdown', '--wrap=none'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, _ = pandoc.communicate(text.encode('utf-8'))
md = stdout.decode('utf-8').strip()
# Fix up odd spacing in lists.
return re.sub(r'^- ', '- ', md, flags=re.M)
def changelog_as_markdown():
"""Get the latest changelog entry as hacked up Markdown.
"""
rst = get_latest_changelog()
# Replace plugin links with plugin names.
rst = re.sub(r':doc:`/plugins/(\w+)`', r'``\1``', rst)
# References with text.
rst = re.sub(r':ref:`([^<]+)(<[^>]+>)`', r'\1', rst)
# Other backslashes with verbatim ranges.
rst = re.sub(r'(\s)`([^`]+)`([^_])', r'\1``\2``\3', rst)
# Command links with command names.
rst = re.sub(r':ref:`(\w+)-cmd`', r'``\1``', rst)
# Bug numbers.
rst = re.sub(r':bug:`(\d+)`', r'#\1', rst)
# Users.
rst = re.sub(r':user:`(\w+)`', r'@\1', rst)
# Convert with Pandoc.
md = rst2md(rst)
# Restore escaped issue numbers.
md = re.sub(r'\\#(\d+)\b', r'#\1', md)
return md
@release.command()
def changelog():
"""Get the most recent version's changelog as Markdown.
"""
print(changelog_as_markdown())
def get_version(index=0):
"""Read the current version from the changelog.
"""
with open(CHANGELOG) as f:
cur_index = 0
for line in f:
match = re.search(r'^\d+\.\d+\.\d+', line)
if match:
if cur_index == index:
return match.group(0)
else:
cur_index += 1
@release.command()
def version():
"""Display the current version.
"""
print(get_version())
@release.command()
def datestamp():
"""Enter today's date as the release date in the changelog.
"""
dt = datetime.datetime.now()
stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)
marker = '(in development)'
lines = []
underline_length = None
with open(CHANGELOG) as f:
for line in f:
if marker in line:
# The header line.
line = line.replace(marker, stamp)
lines.append(line)
underline_length = len(line.strip())
elif underline_length:
# This is the line after the header. Rewrite the dashes.
lines.append('-' * underline_length + '\n')
underline_length = None
else:
lines.append(line)
with open(CHANGELOG, 'w') as f:
for line in lines:
f.write(line)
# MASKED: prep function (lines 267-295)
@release.command()
def publish():
"""Unleash a release unto the world.
- Push the tag to GitHub.
- Upload to PyPI.
"""
version = get_version(1)
# Push to GitHub.
with chdir(BASE):
subprocess.check_call(['git', 'push'])
subprocess.check_call(['git', 'push', '--tags'])
# Upload to PyPI.
path = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['twine', 'upload', path])
@release.command()
def ghrelease():
"""Create a GitHub release using the `github-release` command-line
tool.
Reads the changelog to upload from `changelog.md`. Uploads the
tarball from the `dist` directory.
"""
version = get_version(1)
tag = 'v' + version
# Load the changelog.
with open(os.path.join(BASE, 'changelog.md')) as f:
cl_md = f.read()
# Create the release.
subprocess.check_call([
'github-release', 'release',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', f'{GITHUB_REPO} {version}',
'--description', cl_md,
])
# Attach the release tarball.
tarball = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call([
'github-release', 'upload',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', os.path.basename(tarball),
'--file', tarball,
])
if __name__ == '__main__':
release() | @release.command()
def prep():
"""Run all steps to prepare a release.
- Tag the commit.
- Build the sdist package.
- Generate the Markdown changelog to ``changelog.md``.
- Bump the version number to the next version.
"""
cur_version = get_version()
# Tag.
subprocess.check_call(['git', 'tag', f'v{cur_version}'])
# Build.
with chdir(BASE):
subprocess.check_call(['python', 'setup.py', 'sdist'])
# Generate Markdown changelog.
cl = changelog_as_markdown()
with open(os.path.join(BASE, 'changelog.md'), 'w') as f:
f.write(cl)
# Version number bump.
# FIXME It should be possible to specify this as an argument.
version_parts = [int(n) for n in cur_version.split('.')]
version_parts[-1] += 1
next_version = '.'.join(map(str, version_parts))
bump_version(next_version) | 267 | 295 | #!/usr/bin/env python3
"""A utility script for automating the beets release process.
"""
import click
import os
import re
import subprocess
from contextlib import contextmanager
import datetime
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHANGELOG = os.path.join(BASE, 'docs', 'changelog.rst')
@contextmanager
def chdir(d):
"""A context manager that temporary changes the working directory.
"""
olddir = os.getcwd()
os.chdir(d)
yield
os.chdir(olddir)
@click.group()
def release():
pass
# Locations (filenames and patterns) of the version number.
VERSION_LOCS = [
(
os.path.join(BASE, 'beets', '__init__.py'),
[
(
r'__version__\s*=\s*u[\'"]([0-9\.]+)[\'"]',
"__version__ = '{version}'",
)
]
),
(
os.path.join(BASE, 'docs', 'conf.py'),
[
(
r'version\s*=\s*[\'"]([0-9\.]+)[\'"]',
"version = '{minor}'",
),
(
r'release\s*=\s*[\'"]([0-9\.]+)[\'"]',
"release = '{version}'",
),
]
),
(
os.path.join(BASE, 'setup.py'),
[
(
r'\s*version\s*=\s*[\'"]([0-9\.]+)[\'"]',
" version='{version}',",
)
]
),
]
GITHUB_USER = 'beetbox'
GITHUB_REPO = 'beets'
def bump_version(version):
"""Update the version number in setup.py, docs config, changelog,
and root module.
"""
version_parts = [int(p) for p in version.split('.')]
assert len(version_parts) == 3, "invalid version number"
minor = '{}.{}'.format(*version_parts)
major = '{}'.format(*version_parts)
# Replace the version each place where it lives.
for filename, locations in VERSION_LOCS:
# Read and transform the file.
out_lines = []
with open(filename) as f:
found = False
for line in f:
for pattern, template in locations:
match = re.match(pattern, line)
if match:
# Check that this version is actually newer.
old_version = match.group(1)
old_parts = [int(p) for p in old_version.split('.')]
assert version_parts > old_parts, \
"version must be newer than {}".format(
old_version
)
# Insert the new version.
out_lines.append(template.format(
version=version,
major=major,
minor=minor,
) + '\n')
found = True
break
else:
# Normal line.
out_lines.append(line)
if not found:
print(f"No pattern found in {filename}")
# Write the file back.
with open(filename, 'w') as f:
f.write(''.join(out_lines))
# Generate bits to insert into changelog.
header_line = f'{version} (in development)'
header = '\n\n' + header_line + '\n' + '-' * len(header_line) + '\n\n'
header += 'Changelog goes here!\n'
# Insert into the right place.
with open(CHANGELOG) as f:
contents = f.read()
location = contents.find('\n\n') # First blank line.
contents = contents[:location] + header + contents[location:]
# Write back.
with open(CHANGELOG, 'w') as f:
f.write(contents)
@release.command()
@click.argument('version')
def bump(version):
"""Bump the version number.
"""
bump_version(version)
def get_latest_changelog():
"""Extract the first section of the changelog.
"""
started = False
lines = []
with open(CHANGELOG) as f:
for line in f:
if re.match(r'^--+$', line.strip()):
# Section boundary. Start or end.
if started:
# Remove last line, which is the header of the next
# section.
del lines[-1]
break
else:
started = True
elif started:
lines.append(line)
return ''.join(lines).strip()
def rst2md(text):
"""Use Pandoc to convert text from ReST to Markdown.
"""
pandoc = subprocess.Popen(
['pandoc', '--from=rst', '--to=markdown', '--wrap=none'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, _ = pandoc.communicate(text.encode('utf-8'))
md = stdout.decode('utf-8').strip()
# Fix up odd spacing in lists.
return re.sub(r'^- ', '- ', md, flags=re.M)
def changelog_as_markdown():
"""Get the latest changelog entry as hacked up Markdown.
"""
rst = get_latest_changelog()
# Replace plugin links with plugin names.
rst = re.sub(r':doc:`/plugins/(\w+)`', r'``\1``', rst)
# References with text.
rst = re.sub(r':ref:`([^<]+)(<[^>]+>)`', r'\1', rst)
# Other backslashes with verbatim ranges.
rst = re.sub(r'(\s)`([^`]+)`([^_])', r'\1``\2``\3', rst)
# Command links with command names.
rst = re.sub(r':ref:`(\w+)-cmd`', r'``\1``', rst)
# Bug numbers.
rst = re.sub(r':bug:`(\d+)`', r'#\1', rst)
# Users.
rst = re.sub(r':user:`(\w+)`', r'@\1', rst)
# Convert with Pandoc.
md = rst2md(rst)
# Restore escaped issue numbers.
md = re.sub(r'\\#(\d+)\b', r'#\1', md)
return md
@release.command()
def changelog():
"""Get the most recent version's changelog as Markdown.
"""
print(changelog_as_markdown())
def get_version(index=0):
"""Read the current version from the changelog.
"""
with open(CHANGELOG) as f:
cur_index = 0
for line in f:
match = re.search(r'^\d+\.\d+\.\d+', line)
if match:
if cur_index == index:
return match.group(0)
else:
cur_index += 1
@release.command()
def version():
"""Display the current version.
"""
print(get_version())
@release.command()
def datestamp():
"""Enter today's date as the release date in the changelog.
"""
dt = datetime.datetime.now()
stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)
marker = '(in development)'
lines = []
underline_length = None
with open(CHANGELOG) as f:
for line in f:
if marker in line:
# The header line.
line = line.replace(marker, stamp)
lines.append(line)
underline_length = len(line.strip())
elif underline_length:
# This is the line after the header. Rewrite the dashes.
lines.append('-' * underline_length + '\n')
underline_length = None
else:
lines.append(line)
with open(CHANGELOG, 'w') as f:
for line in lines:
f.write(line)
@release.command()
def prep():
"""Run all steps to prepare a release.
- Tag the commit.
- Build the sdist package.
- Generate the Markdown changelog to ``changelog.md``.
- Bump the version number to the next version.
"""
cur_version = get_version()
# Tag.
subprocess.check_call(['git', 'tag', f'v{cur_version}'])
# Build.
with chdir(BASE):
subprocess.check_call(['python', 'setup.py', 'sdist'])
# Generate Markdown changelog.
cl = changelog_as_markdown()
with open(os.path.join(BASE, 'changelog.md'), 'w') as f:
f.write(cl)
# Version number bump.
# FIXME It should be possible to specify this as an argument.
version_parts = [int(n) for n in cur_version.split('.')]
version_parts[-1] += 1
next_version = '.'.join(map(str, version_parts))
bump_version(next_version)
@release.command()
def publish():
"""Unleash a release unto the world.
- Push the tag to GitHub.
- Upload to PyPI.
"""
version = get_version(1)
# Push to GitHub.
with chdir(BASE):
subprocess.check_call(['git', 'push'])
subprocess.check_call(['git', 'push', '--tags'])
# Upload to PyPI.
path = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['twine', 'upload', path])
@release.command()
def ghrelease():
"""Create a GitHub release using the `github-release` command-line
tool.
Reads the changelog to upload from `changelog.md`. Uploads the
tarball from the `dist` directory.
"""
version = get_version(1)
tag = 'v' + version
# Load the changelog.
with open(os.path.join(BASE, 'changelog.md')) as f:
cl_md = f.read()
# Create the release.
subprocess.check_call([
'github-release', 'release',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', f'{GITHUB_REPO} {version}',
'--description', cl_md,
])
# Attach the release tarball.
tarball = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call([
'github-release', 'upload',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', os.path.basename(tarball),
'--file', tarball,
])
if __name__ == '__main__':
release()
|
publish | Unleash a release unto the world.
- Push the tag to GitHub.
- Upload to PyPI. | #!/usr/bin/env python3
"""A utility script for automating the beets release process.
"""
import click
import os
import re
import subprocess
from contextlib import contextmanager
import datetime
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHANGELOG = os.path.join(BASE, 'docs', 'changelog.rst')
@contextmanager
def chdir(d):
"""A context manager that temporary changes the working directory.
"""
olddir = os.getcwd()
os.chdir(d)
yield
os.chdir(olddir)
@click.group()
def release():
pass
# Locations (filenames and patterns) of the version number.
VERSION_LOCS = [
(
os.path.join(BASE, 'beets', '__init__.py'),
[
(
r'__version__\s*=\s*u[\'"]([0-9\.]+)[\'"]',
"__version__ = '{version}'",
)
]
),
(
os.path.join(BASE, 'docs', 'conf.py'),
[
(
r'version\s*=\s*[\'"]([0-9\.]+)[\'"]',
"version = '{minor}'",
),
(
r'release\s*=\s*[\'"]([0-9\.]+)[\'"]',
"release = '{version}'",
),
]
),
(
os.path.join(BASE, 'setup.py'),
[
(
r'\s*version\s*=\s*[\'"]([0-9\.]+)[\'"]',
" version='{version}',",
)
]
),
]
GITHUB_USER = 'beetbox'
GITHUB_REPO = 'beets'
def bump_version(version):
"""Update the version number in setup.py, docs config, changelog,
and root module.
"""
version_parts = [int(p) for p in version.split('.')]
assert len(version_parts) == 3, "invalid version number"
minor = '{}.{}'.format(*version_parts)
major = '{}'.format(*version_parts)
# Replace the version each place where it lives.
for filename, locations in VERSION_LOCS:
# Read and transform the file.
out_lines = []
with open(filename) as f:
found = False
for line in f:
for pattern, template in locations:
match = re.match(pattern, line)
if match:
# Check that this version is actually newer.
old_version = match.group(1)
old_parts = [int(p) for p in old_version.split('.')]
assert version_parts > old_parts, \
"version must be newer than {}".format(
old_version
)
# Insert the new version.
out_lines.append(template.format(
version=version,
major=major,
minor=minor,
) + '\n')
found = True
break
else:
# Normal line.
out_lines.append(line)
if not found:
print(f"No pattern found in {filename}")
# Write the file back.
with open(filename, 'w') as f:
f.write(''.join(out_lines))
# Generate bits to insert into changelog.
header_line = f'{version} (in development)'
header = '\n\n' + header_line + '\n' + '-' * len(header_line) + '\n\n'
header += 'Changelog goes here!\n'
# Insert into the right place.
with open(CHANGELOG) as f:
contents = f.read()
location = contents.find('\n\n') # First blank line.
contents = contents[:location] + header + contents[location:]
# Write back.
with open(CHANGELOG, 'w') as f:
f.write(contents)
@release.command()
@click.argument('version')
def bump(version):
"""Bump the version number.
"""
bump_version(version)
def get_latest_changelog():
"""Extract the first section of the changelog.
"""
started = False
lines = []
with open(CHANGELOG) as f:
for line in f:
if re.match(r'^--+$', line.strip()):
# Section boundary. Start or end.
if started:
# Remove last line, which is the header of the next
# section.
del lines[-1]
break
else:
started = True
elif started:
lines.append(line)
return ''.join(lines).strip()
def rst2md(text):
"""Use Pandoc to convert text from ReST to Markdown.
"""
pandoc = subprocess.Popen(
['pandoc', '--from=rst', '--to=markdown', '--wrap=none'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, _ = pandoc.communicate(text.encode('utf-8'))
md = stdout.decode('utf-8').strip()
# Fix up odd spacing in lists.
return re.sub(r'^- ', '- ', md, flags=re.M)
def changelog_as_markdown():
"""Get the latest changelog entry as hacked up Markdown.
"""
rst = get_latest_changelog()
# Replace plugin links with plugin names.
rst = re.sub(r':doc:`/plugins/(\w+)`', r'``\1``', rst)
# References with text.
rst = re.sub(r':ref:`([^<]+)(<[^>]+>)`', r'\1', rst)
# Other backslashes with verbatim ranges.
rst = re.sub(r'(\s)`([^`]+)`([^_])', r'\1``\2``\3', rst)
# Command links with command names.
rst = re.sub(r':ref:`(\w+)-cmd`', r'``\1``', rst)
# Bug numbers.
rst = re.sub(r':bug:`(\d+)`', r'#\1', rst)
# Users.
rst = re.sub(r':user:`(\w+)`', r'@\1', rst)
# Convert with Pandoc.
md = rst2md(rst)
# Restore escaped issue numbers.
md = re.sub(r'\\#(\d+)\b', r'#\1', md)
return md
@release.command()
def changelog():
"""Get the most recent version's changelog as Markdown.
"""
print(changelog_as_markdown())
def get_version(index=0):
"""Read the current version from the changelog.
"""
with open(CHANGELOG) as f:
cur_index = 0
for line in f:
match = re.search(r'^\d+\.\d+\.\d+', line)
if match:
if cur_index == index:
return match.group(0)
else:
cur_index += 1
@release.command()
def version():
"""Display the current version.
"""
print(get_version())
@release.command()
def datestamp():
"""Enter today's date as the release date in the changelog.
"""
dt = datetime.datetime.now()
stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)
marker = '(in development)'
lines = []
underline_length = None
with open(CHANGELOG) as f:
for line in f:
if marker in line:
# The header line.
line = line.replace(marker, stamp)
lines.append(line)
underline_length = len(line.strip())
elif underline_length:
# This is the line after the header. Rewrite the dashes.
lines.append('-' * underline_length + '\n')
underline_length = None
else:
lines.append(line)
with open(CHANGELOG, 'w') as f:
for line in lines:
f.write(line)
@release.command()
def prep():
"""Run all steps to prepare a release.
- Tag the commit.
- Build the sdist package.
- Generate the Markdown changelog to ``changelog.md``.
- Bump the version number to the next version.
"""
cur_version = get_version()
# Tag.
subprocess.check_call(['git', 'tag', f'v{cur_version}'])
# Build.
with chdir(BASE):
subprocess.check_call(['python', 'setup.py', 'sdist'])
# Generate Markdown changelog.
cl = changelog_as_markdown()
with open(os.path.join(BASE, 'changelog.md'), 'w') as f:
f.write(cl)
# Version number bump.
# FIXME It should be possible to specify this as an argument.
version_parts = [int(n) for n in cur_version.split('.')]
version_parts[-1] += 1
next_version = '.'.join(map(str, version_parts))
bump_version(next_version)
# MASKED: publish function (lines 298-314)
@release.command()
def ghrelease():
"""Create a GitHub release using the `github-release` command-line
tool.
Reads the changelog to upload from `changelog.md`. Uploads the
tarball from the `dist` directory.
"""
version = get_version(1)
tag = 'v' + version
# Load the changelog.
with open(os.path.join(BASE, 'changelog.md')) as f:
cl_md = f.read()
# Create the release.
subprocess.check_call([
'github-release', 'release',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', f'{GITHUB_REPO} {version}',
'--description', cl_md,
])
# Attach the release tarball.
tarball = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call([
'github-release', 'upload',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', os.path.basename(tarball),
'--file', tarball,
])
if __name__ == '__main__':
release() | @release.command()
def publish():
"""Unleash a release unto the world.
- Push the tag to GitHub.
- Upload to PyPI.
"""
version = get_version(1)
# Push to GitHub.
with chdir(BASE):
subprocess.check_call(['git', 'push'])
subprocess.check_call(['git', 'push', '--tags'])
# Upload to PyPI.
path = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['twine', 'upload', path]) | 298 | 314 | #!/usr/bin/env python3
"""A utility script for automating the beets release process.
"""
import click
import os
import re
import subprocess
from contextlib import contextmanager
import datetime
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHANGELOG = os.path.join(BASE, 'docs', 'changelog.rst')
@contextmanager
def chdir(d):
"""A context manager that temporary changes the working directory.
"""
olddir = os.getcwd()
os.chdir(d)
yield
os.chdir(olddir)
@click.group()
def release():
pass
# Locations (filenames and patterns) of the version number.
VERSION_LOCS = [
(
os.path.join(BASE, 'beets', '__init__.py'),
[
(
r'__version__\s*=\s*u[\'"]([0-9\.]+)[\'"]',
"__version__ = '{version}'",
)
]
),
(
os.path.join(BASE, 'docs', 'conf.py'),
[
(
r'version\s*=\s*[\'"]([0-9\.]+)[\'"]',
"version = '{minor}'",
),
(
r'release\s*=\s*[\'"]([0-9\.]+)[\'"]',
"release = '{version}'",
),
]
),
(
os.path.join(BASE, 'setup.py'),
[
(
r'\s*version\s*=\s*[\'"]([0-9\.]+)[\'"]',
" version='{version}',",
)
]
),
]
GITHUB_USER = 'beetbox'
GITHUB_REPO = 'beets'
def bump_version(version):
"""Update the version number in setup.py, docs config, changelog,
and root module.
"""
version_parts = [int(p) for p in version.split('.')]
assert len(version_parts) == 3, "invalid version number"
minor = '{}.{}'.format(*version_parts)
major = '{}'.format(*version_parts)
# Replace the version each place where it lives.
for filename, locations in VERSION_LOCS:
# Read and transform the file.
out_lines = []
with open(filename) as f:
found = False
for line in f:
for pattern, template in locations:
match = re.match(pattern, line)
if match:
# Check that this version is actually newer.
old_version = match.group(1)
old_parts = [int(p) for p in old_version.split('.')]
assert version_parts > old_parts, \
"version must be newer than {}".format(
old_version
)
# Insert the new version.
out_lines.append(template.format(
version=version,
major=major,
minor=minor,
) + '\n')
found = True
break
else:
# Normal line.
out_lines.append(line)
if not found:
print(f"No pattern found in {filename}")
# Write the file back.
with open(filename, 'w') as f:
f.write(''.join(out_lines))
# Generate bits to insert into changelog.
header_line = f'{version} (in development)'
header = '\n\n' + header_line + '\n' + '-' * len(header_line) + '\n\n'
header += 'Changelog goes here!\n'
# Insert into the right place.
with open(CHANGELOG) as f:
contents = f.read()
location = contents.find('\n\n') # First blank line.
contents = contents[:location] + header + contents[location:]
# Write back.
with open(CHANGELOG, 'w') as f:
f.write(contents)
@release.command()
@click.argument('version')
def bump(version):
"""Bump the version number.
"""
bump_version(version)
def get_latest_changelog():
"""Extract the first section of the changelog.
"""
started = False
lines = []
with open(CHANGELOG) as f:
for line in f:
if re.match(r'^--+$', line.strip()):
# Section boundary. Start or end.
if started:
# Remove last line, which is the header of the next
# section.
del lines[-1]
break
else:
started = True
elif started:
lines.append(line)
return ''.join(lines).strip()
def rst2md(text):
"""Use Pandoc to convert text from ReST to Markdown.
"""
pandoc = subprocess.Popen(
['pandoc', '--from=rst', '--to=markdown', '--wrap=none'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, _ = pandoc.communicate(text.encode('utf-8'))
md = stdout.decode('utf-8').strip()
# Fix up odd spacing in lists.
return re.sub(r'^- ', '- ', md, flags=re.M)
def changelog_as_markdown():
"""Get the latest changelog entry as hacked up Markdown.
"""
rst = get_latest_changelog()
# Replace plugin links with plugin names.
rst = re.sub(r':doc:`/plugins/(\w+)`', r'``\1``', rst)
# References with text.
rst = re.sub(r':ref:`([^<]+)(<[^>]+>)`', r'\1', rst)
# Other backslashes with verbatim ranges.
rst = re.sub(r'(\s)`([^`]+)`([^_])', r'\1``\2``\3', rst)
# Command links with command names.
rst = re.sub(r':ref:`(\w+)-cmd`', r'``\1``', rst)
# Bug numbers.
rst = re.sub(r':bug:`(\d+)`', r'#\1', rst)
# Users.
rst = re.sub(r':user:`(\w+)`', r'@\1', rst)
# Convert with Pandoc.
md = rst2md(rst)
# Restore escaped issue numbers.
md = re.sub(r'\\#(\d+)\b', r'#\1', md)
return md
@release.command()
def changelog():
"""Get the most recent version's changelog as Markdown.
"""
print(changelog_as_markdown())
def get_version(index=0):
"""Read the current version from the changelog.
"""
with open(CHANGELOG) as f:
cur_index = 0
for line in f:
match = re.search(r'^\d+\.\d+\.\d+', line)
if match:
if cur_index == index:
return match.group(0)
else:
cur_index += 1
@release.command()
def version():
"""Display the current version.
"""
print(get_version())
@release.command()
def datestamp():
"""Enter today's date as the release date in the changelog.
"""
dt = datetime.datetime.now()
stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)
marker = '(in development)'
lines = []
underline_length = None
with open(CHANGELOG) as f:
for line in f:
if marker in line:
# The header line.
line = line.replace(marker, stamp)
lines.append(line)
underline_length = len(line.strip())
elif underline_length:
# This is the line after the header. Rewrite the dashes.
lines.append('-' * underline_length + '\n')
underline_length = None
else:
lines.append(line)
with open(CHANGELOG, 'w') as f:
for line in lines:
f.write(line)
@release.command()
def prep():
"""Run all steps to prepare a release.
- Tag the commit.
- Build the sdist package.
- Generate the Markdown changelog to ``changelog.md``.
- Bump the version number to the next version.
"""
cur_version = get_version()
# Tag.
subprocess.check_call(['git', 'tag', f'v{cur_version}'])
# Build.
with chdir(BASE):
subprocess.check_call(['python', 'setup.py', 'sdist'])
# Generate Markdown changelog.
cl = changelog_as_markdown()
with open(os.path.join(BASE, 'changelog.md'), 'w') as f:
f.write(cl)
# Version number bump.
# FIXME It should be possible to specify this as an argument.
version_parts = [int(n) for n in cur_version.split('.')]
version_parts[-1] += 1
next_version = '.'.join(map(str, version_parts))
bump_version(next_version)
@release.command()
def publish():
"""Unleash a release unto the world.
- Push the tag to GitHub.
- Upload to PyPI.
"""
version = get_version(1)
# Push to GitHub.
with chdir(BASE):
subprocess.check_call(['git', 'push'])
subprocess.check_call(['git', 'push', '--tags'])
# Upload to PyPI.
path = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['twine', 'upload', path])
@release.command()
def ghrelease():
"""Create a GitHub release using the `github-release` command-line
tool.
Reads the changelog to upload from `changelog.md`. Uploads the
tarball from the `dist` directory.
"""
version = get_version(1)
tag = 'v' + version
# Load the changelog.
with open(os.path.join(BASE, 'changelog.md')) as f:
cl_md = f.read()
# Create the release.
subprocess.check_call([
'github-release', 'release',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', f'{GITHUB_REPO} {version}',
'--description', cl_md,
])
# Attach the release tarball.
tarball = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call([
'github-release', 'upload',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', os.path.basename(tarball),
'--file', tarball,
])
if __name__ == '__main__':
release()
|
getEtiqueta | Obtiene el nombre de la captura
Args:
linea (str): Linea donde se va a buscar la etiqueta
Returns:
str: Regresa el nombre de la etiqueta | import re
from Error import Error4,Error6,Error9
from DataBase import BaseDatos,BdRow
from .precompilada import precompilada
from typing import Pattern
# MASKED: getEtiqueta function (lines 7-23)
def calcularEtiqueta(sustraendo:str,minuendo:str)-> str:
"""Resta la diferencia entre dos PC en hexadecimal
sustraendo - minuendo
- Si
- Sustraendo - minuendo
- En caso de error regresa 'e10' operando muy grande
Args:
sustraendo (str): Ejemplo '0x7'
minuendo (str): Ejemplo '0x1'
Returns:
str: Ejemplo '0x06'
"""
print(sustraendo)
print(minuendo)
sustraendo=int(sustraendo,16)
minuendo=int(minuendo,16)
resultado:int= sustraendo-minuendo
print(resultado)
if resultado <-127 or 128<resultado:
return 'e10' #E10 el salto relativo es muy lejano
# Si es negativa
elif resultado<0:
return convertirA2Hex(resultado)
# si es positiva
else:
return hex(resultado)
def bindigits(n:int, bits:int)->str:
"""Convierte a binario un numero de complemento A2 en caso de negativo, normal en caso de ser positivo
Args:
n (int): E.g 7
bits (int): eg 3
Returns:
str: E.g '001'
"""
s = bin(n & int("1"*bits, 2))[2:]
return ("{0:0>%s}" % (bits)).format(s)
def convertirA2Hex(numero:int)-> str:
"""Convierte un numero decimal a hexadecimal
- Si el número es decimal lo convierte a complemento A2
Args:
numero (int): Número decimal que se quiere convertir Eg. 07
Returns:
str: Eg. 0x07
"""
# cuantos bits ocupa el número hexadecimal
cuantosBits=(len(hex(numero))-2) *4 # el -2 es 0x, el 4 es porque 1 hex equivale a 4 bits
#numero convertido a binario
binario=bindigits(numero,cuantosBits)
return hex(int(binario, 2))
def precompilarPasada1(numLinea:int,modo:str,linea:str,pc: str)->precompilada:
# variables globales
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
mnemonico =busqueda.group(1)
etiqueta=busqueda.group(2)
# Consulta a la base de datos-------------------------------
consultaBd:BdRow = BaseDatos.bdSearch(mnemonico,6)
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(pc,16) +2) # El más 2 es porque todas las relativos usan 2 bytes
# Datos directos--------------------------------------
lineaPrecompilada=precompilada(numLinea,modo,pcActual,consultaBd.opcode,etiqueta,consultaBd.byte)
# Datos detivados-----------------------------------
lineaPrecompilada.bytesOcupados=consultaBd.byte
return lineaPrecompilada
def precompilarPasada2(lineaPrecompilada:precompilada,pcEtiqueta:str)->precompilada:
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(lineaPrecompilada.pcActual,16) ) # El más 2 es porque todas las relativos usan 2 bytes
lineaPrecompilada1:precompilada
# Calculamos el operando
operandoPrecompilado=calcularEtiqueta(pcEtiqueta,pcActual)
# Verificamos si el salto relaitvo no es tan grande
if operandoPrecompilado=='e10': # en caso de error salto muy lejando
lineaPrecompilada1=precompilada(0,'','','','',0)
lineaPrecompilada1.error='e10'
else:
operandoPrecompilado=operandoPrecompilado[2:]
# hacer una copia
lineaPrecompilada1=precompilada(lineaPrecompilada.numLinea,lineaPrecompilada.modo,hex(int(lineaPrecompilada.pcActual,16)-2),lineaPrecompilada.opcode,operandoPrecompilado,lineaPrecompilada.byte)
print(operandoPrecompilado)
return lineaPrecompilada1
#return lineaPrecompilada1 | def getEtiqueta(linea:str)->str:
"""Obtiene el nombre de la captura
Args:
linea (str): Linea donde se va a buscar la etiqueta
Returns:
str: Regresa el nombre de la etiqueta
"""
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
etiqueta =busqueda.group(2)
return etiqueta | 7 | 23 | import re
from Error import Error4,Error6,Error9
from DataBase import BaseDatos,BdRow
from .precompilada import precompilada
from typing import Pattern
def getEtiqueta(linea:str)->str:
"""Obtiene el nombre de la captura
Args:
linea (str): Linea donde se va a buscar la etiqueta
Returns:
str: Regresa el nombre de la etiqueta
"""
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
etiqueta =busqueda.group(2)
return etiqueta
def calcularEtiqueta(sustraendo:str,minuendo:str)-> str:
"""Resta la diferencia entre dos PC en hexadecimal
sustraendo - minuendo
- Si
- Sustraendo - minuendo
- En caso de error regresa 'e10' operando muy grande
Args:
sustraendo (str): Ejemplo '0x7'
minuendo (str): Ejemplo '0x1'
Returns:
str: Ejemplo '0x06'
"""
print(sustraendo)
print(minuendo)
sustraendo=int(sustraendo,16)
minuendo=int(minuendo,16)
resultado:int= sustraendo-minuendo
print(resultado)
if resultado <-127 or 128<resultado:
return 'e10' #E10 el salto relativo es muy lejano
# Si es negativa
elif resultado<0:
return convertirA2Hex(resultado)
# si es positiva
else:
return hex(resultado)
def bindigits(n:int, bits:int)->str:
"""Convierte a binario un numero de complemento A2 en caso de negativo, normal en caso de ser positivo
Args:
n (int): E.g 7
bits (int): eg 3
Returns:
str: E.g '001'
"""
s = bin(n & int("1"*bits, 2))[2:]
return ("{0:0>%s}" % (bits)).format(s)
def convertirA2Hex(numero:int)-> str:
"""Convierte un numero decimal a hexadecimal
- Si el número es decimal lo convierte a complemento A2
Args:
numero (int): Número decimal que se quiere convertir Eg. 07
Returns:
str: Eg. 0x07
"""
# cuantos bits ocupa el número hexadecimal
cuantosBits=(len(hex(numero))-2) *4 # el -2 es 0x, el 4 es porque 1 hex equivale a 4 bits
#numero convertido a binario
binario=bindigits(numero,cuantosBits)
return hex(int(binario, 2))
def precompilarPasada1(numLinea:int,modo:str,linea:str,pc: str)->precompilada:
# variables globales
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
mnemonico =busqueda.group(1)
etiqueta=busqueda.group(2)
# Consulta a la base de datos-------------------------------
consultaBd:BdRow = BaseDatos.bdSearch(mnemonico,6)
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(pc,16) +2) # El más 2 es porque todas las relativos usan 2 bytes
# Datos directos--------------------------------------
lineaPrecompilada=precompilada(numLinea,modo,pcActual,consultaBd.opcode,etiqueta,consultaBd.byte)
# Datos detivados-----------------------------------
lineaPrecompilada.bytesOcupados=consultaBd.byte
return lineaPrecompilada
def precompilarPasada2(lineaPrecompilada:precompilada,pcEtiqueta:str)->precompilada:
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(lineaPrecompilada.pcActual,16) ) # El más 2 es porque todas las relativos usan 2 bytes
lineaPrecompilada1:precompilada
# Calculamos el operando
operandoPrecompilado=calcularEtiqueta(pcEtiqueta,pcActual)
# Verificamos si el salto relaitvo no es tan grande
if operandoPrecompilado=='e10': # en caso de error salto muy lejando
lineaPrecompilada1=precompilada(0,'','','','',0)
lineaPrecompilada1.error='e10'
else:
operandoPrecompilado=operandoPrecompilado[2:]
# hacer una copia
lineaPrecompilada1=precompilada(lineaPrecompilada.numLinea,lineaPrecompilada.modo,hex(int(lineaPrecompilada.pcActual,16)-2),lineaPrecompilada.opcode,operandoPrecompilado,lineaPrecompilada.byte)
print(operandoPrecompilado)
return lineaPrecompilada1
#return lineaPrecompilada1 |
calcularEtiqueta | Resta la diferencia entre dos PC en hexadecimal
sustraendo - minuendo
- Si
- Sustraendo - minuendo
- En caso de error regresa 'e10' operando muy grande
Args:
sustraendo (str): Ejemplo '0x7'
minuendo (str): Ejemplo '0x1'
Returns:
str: Ejemplo '0x06' | import re
from Error import Error4,Error6,Error9
from DataBase import BaseDatos,BdRow
from .precompilada import precompilada
from typing import Pattern
def getEtiqueta(linea:str)->str:
"""Obtiene el nombre de la captura
Args:
linea (str): Linea donde se va a buscar la etiqueta
Returns:
str: Regresa el nombre de la etiqueta
"""
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
etiqueta =busqueda.group(2)
return etiqueta
# MASKED: calcularEtiqueta function (lines 26-55)
def bindigits(n:int, bits:int)->str:
"""Convierte a binario un numero de complemento A2 en caso de negativo, normal en caso de ser positivo
Args:
n (int): E.g 7
bits (int): eg 3
Returns:
str: E.g '001'
"""
s = bin(n & int("1"*bits, 2))[2:]
return ("{0:0>%s}" % (bits)).format(s)
def convertirA2Hex(numero:int)-> str:
"""Convierte un numero decimal a hexadecimal
- Si el número es decimal lo convierte a complemento A2
Args:
numero (int): Número decimal que se quiere convertir Eg. 07
Returns:
str: Eg. 0x07
"""
# cuantos bits ocupa el número hexadecimal
cuantosBits=(len(hex(numero))-2) *4 # el -2 es 0x, el 4 es porque 1 hex equivale a 4 bits
#numero convertido a binario
binario=bindigits(numero,cuantosBits)
return hex(int(binario, 2))
def precompilarPasada1(numLinea:int,modo:str,linea:str,pc: str)->precompilada:
# variables globales
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
mnemonico =busqueda.group(1)
etiqueta=busqueda.group(2)
# Consulta a la base de datos-------------------------------
consultaBd:BdRow = BaseDatos.bdSearch(mnemonico,6)
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(pc,16) +2) # El más 2 es porque todas las relativos usan 2 bytes
# Datos directos--------------------------------------
lineaPrecompilada=precompilada(numLinea,modo,pcActual,consultaBd.opcode,etiqueta,consultaBd.byte)
# Datos detivados-----------------------------------
lineaPrecompilada.bytesOcupados=consultaBd.byte
return lineaPrecompilada
def precompilarPasada2(lineaPrecompilada:precompilada,pcEtiqueta:str)->precompilada:
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(lineaPrecompilada.pcActual,16) ) # El más 2 es porque todas las relativos usan 2 bytes
lineaPrecompilada1:precompilada
# Calculamos el operando
operandoPrecompilado=calcularEtiqueta(pcEtiqueta,pcActual)
# Verificamos si el salto relaitvo no es tan grande
if operandoPrecompilado=='e10': # en caso de error salto muy lejando
lineaPrecompilada1=precompilada(0,'','','','',0)
lineaPrecompilada1.error='e10'
else:
operandoPrecompilado=operandoPrecompilado[2:]
# hacer una copia
lineaPrecompilada1=precompilada(lineaPrecompilada.numLinea,lineaPrecompilada.modo,hex(int(lineaPrecompilada.pcActual,16)-2),lineaPrecompilada.opcode,operandoPrecompilado,lineaPrecompilada.byte)
print(operandoPrecompilado)
return lineaPrecompilada1
#return lineaPrecompilada1 | def calcularEtiqueta(sustraendo:str,minuendo:str)-> str:
"""Resta la diferencia entre dos PC en hexadecimal
sustraendo - minuendo
- Si
- Sustraendo - minuendo
- En caso de error regresa 'e10' operando muy grande
Args:
sustraendo (str): Ejemplo '0x7'
minuendo (str): Ejemplo '0x1'
Returns:
str: Ejemplo '0x06'
"""
print(sustraendo)
print(minuendo)
sustraendo=int(sustraendo,16)
minuendo=int(minuendo,16)
resultado:int= sustraendo-minuendo
print(resultado)
if resultado <-127 or 128<resultado:
return 'e10' #E10 el salto relativo es muy lejano
# Si es negativa
elif resultado<0:
return convertirA2Hex(resultado)
# si es positiva
else:
return hex(resultado) | 26 | 55 | import re
from Error import Error4,Error6,Error9
from DataBase import BaseDatos,BdRow
from .precompilada import precompilada
from typing import Pattern
def getEtiqueta(linea:str)->str:
"""Obtiene el nombre de la captura
Args:
linea (str): Linea donde se va a buscar la etiqueta
Returns:
str: Regresa el nombre de la etiqueta
"""
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
etiqueta =busqueda.group(2)
return etiqueta
def calcularEtiqueta(sustraendo:str,minuendo:str)-> str:
"""Resta la diferencia entre dos PC en hexadecimal
sustraendo - minuendo
- Si
- Sustraendo - minuendo
- En caso de error regresa 'e10' operando muy grande
Args:
sustraendo (str): Ejemplo '0x7'
minuendo (str): Ejemplo '0x1'
Returns:
str: Ejemplo '0x06'
"""
print(sustraendo)
print(minuendo)
sustraendo=int(sustraendo,16)
minuendo=int(minuendo,16)
resultado:int= sustraendo-minuendo
print(resultado)
if resultado <-127 or 128<resultado:
return 'e10' #E10 el salto relativo es muy lejano
# Si es negativa
elif resultado<0:
return convertirA2Hex(resultado)
# si es positiva
else:
return hex(resultado)
def bindigits(n:int, bits:int)->str:
"""Convierte a binario un numero de complemento A2 en caso de negativo, normal en caso de ser positivo
Args:
n (int): E.g 7
bits (int): eg 3
Returns:
str: E.g '001'
"""
s = bin(n & int("1"*bits, 2))[2:]
return ("{0:0>%s}" % (bits)).format(s)
def convertirA2Hex(numero:int)-> str:
"""Convierte un numero decimal a hexadecimal
- Si el número es decimal lo convierte a complemento A2
Args:
numero (int): Número decimal que se quiere convertir Eg. 07
Returns:
str: Eg. 0x07
"""
# cuantos bits ocupa el número hexadecimal
cuantosBits=(len(hex(numero))-2) *4 # el -2 es 0x, el 4 es porque 1 hex equivale a 4 bits
#numero convertido a binario
binario=bindigits(numero,cuantosBits)
return hex(int(binario, 2))
def precompilarPasada1(numLinea:int,modo:str,linea:str,pc: str)->precompilada:
# variables globales
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
mnemonico =busqueda.group(1)
etiqueta=busqueda.group(2)
# Consulta a la base de datos-------------------------------
consultaBd:BdRow = BaseDatos.bdSearch(mnemonico,6)
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(pc,16) +2) # El más 2 es porque todas las relativos usan 2 bytes
# Datos directos--------------------------------------
lineaPrecompilada=precompilada(numLinea,modo,pcActual,consultaBd.opcode,etiqueta,consultaBd.byte)
# Datos detivados-----------------------------------
lineaPrecompilada.bytesOcupados=consultaBd.byte
return lineaPrecompilada
def precompilarPasada2(lineaPrecompilada:precompilada,pcEtiqueta:str)->precompilada:
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(lineaPrecompilada.pcActual,16) ) # El más 2 es porque todas las relativos usan 2 bytes
lineaPrecompilada1:precompilada
# Calculamos el operando
operandoPrecompilado=calcularEtiqueta(pcEtiqueta,pcActual)
# Verificamos si el salto relaitvo no es tan grande
if operandoPrecompilado=='e10': # en caso de error salto muy lejando
lineaPrecompilada1=precompilada(0,'','','','',0)
lineaPrecompilada1.error='e10'
else:
operandoPrecompilado=operandoPrecompilado[2:]
# hacer una copia
lineaPrecompilada1=precompilada(lineaPrecompilada.numLinea,lineaPrecompilada.modo,hex(int(lineaPrecompilada.pcActual,16)-2),lineaPrecompilada.opcode,operandoPrecompilado,lineaPrecompilada.byte)
print(operandoPrecompilado)
return lineaPrecompilada1
#return lineaPrecompilada1 |
convertirA2Hex | Convierte un numero decimal a hexadecimal
- Si el número es decimal lo convierte a complemento A2
Args:
numero (int): Número decimal que se quiere convertir Eg. 07
Returns:
str: Eg. 0x07 | import re
from Error import Error4,Error6,Error9
from DataBase import BaseDatos,BdRow
from .precompilada import precompilada
from typing import Pattern
def getEtiqueta(linea:str)->str:
"""Obtiene el nombre de la captura
Args:
linea (str): Linea donde se va a buscar la etiqueta
Returns:
str: Regresa el nombre de la etiqueta
"""
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
etiqueta =busqueda.group(2)
return etiqueta
def calcularEtiqueta(sustraendo:str,minuendo:str)-> str:
"""Resta la diferencia entre dos PC en hexadecimal
sustraendo - minuendo
- Si
- Sustraendo - minuendo
- En caso de error regresa 'e10' operando muy grande
Args:
sustraendo (str): Ejemplo '0x7'
minuendo (str): Ejemplo '0x1'
Returns:
str: Ejemplo '0x06'
"""
print(sustraendo)
print(minuendo)
sustraendo=int(sustraendo,16)
minuendo=int(minuendo,16)
resultado:int= sustraendo-minuendo
print(resultado)
if resultado <-127 or 128<resultado:
return 'e10' #E10 el salto relativo es muy lejano
# Si es negativa
elif resultado<0:
return convertirA2Hex(resultado)
# si es positiva
else:
return hex(resultado)
def bindigits(n:int, bits:int)->str:
"""Convierte a binario un numero de complemento A2 en caso de negativo, normal en caso de ser positivo
Args:
n (int): E.g 7
bits (int): eg 3
Returns:
str: E.g '001'
"""
s = bin(n & int("1"*bits, 2))[2:]
return ("{0:0>%s}" % (bits)).format(s)
# MASKED: convertirA2Hex function (lines 72-89)
def precompilarPasada1(numLinea:int,modo:str,linea:str,pc: str)->precompilada:
# variables globales
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
mnemonico =busqueda.group(1)
etiqueta=busqueda.group(2)
# Consulta a la base de datos-------------------------------
consultaBd:BdRow = BaseDatos.bdSearch(mnemonico,6)
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(pc,16) +2) # El más 2 es porque todas las relativos usan 2 bytes
# Datos directos--------------------------------------
lineaPrecompilada=precompilada(numLinea,modo,pcActual,consultaBd.opcode,etiqueta,consultaBd.byte)
# Datos detivados-----------------------------------
lineaPrecompilada.bytesOcupados=consultaBd.byte
return lineaPrecompilada
def precompilarPasada2(lineaPrecompilada:precompilada,pcEtiqueta:str)->precompilada:
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(lineaPrecompilada.pcActual,16) ) # El más 2 es porque todas las relativos usan 2 bytes
lineaPrecompilada1:precompilada
# Calculamos el operando
operandoPrecompilado=calcularEtiqueta(pcEtiqueta,pcActual)
# Verificamos si el salto relaitvo no es tan grande
if operandoPrecompilado=='e10': # en caso de error salto muy lejando
lineaPrecompilada1=precompilada(0,'','','','',0)
lineaPrecompilada1.error='e10'
else:
operandoPrecompilado=operandoPrecompilado[2:]
# hacer una copia
lineaPrecompilada1=precompilada(lineaPrecompilada.numLinea,lineaPrecompilada.modo,hex(int(lineaPrecompilada.pcActual,16)-2),lineaPrecompilada.opcode,operandoPrecompilado,lineaPrecompilada.byte)
print(operandoPrecompilado)
return lineaPrecompilada1
#return lineaPrecompilada1 | def convertirA2Hex(numero:int)-> str:
"""Convierte un numero decimal a hexadecimal
- Si el número es decimal lo convierte a complemento A2
Args:
numero (int): Número decimal que se quiere convertir Eg. 07
Returns:
str: Eg. 0x07
"""
# cuantos bits ocupa el número hexadecimal
cuantosBits=(len(hex(numero))-2) *4 # el -2 es 0x, el 4 es porque 1 hex equivale a 4 bits
#numero convertido a binario
binario=bindigits(numero,cuantosBits)
return hex(int(binario, 2)) | 72 | 89 | import re
from Error import Error4,Error6,Error9
from DataBase import BaseDatos,BdRow
from .precompilada import precompilada
from typing import Pattern
def getEtiqueta(linea:str)->str:
"""Obtiene el nombre de la captura
Args:
linea (str): Linea donde se va a buscar la etiqueta
Returns:
str: Regresa el nombre de la etiqueta
"""
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
etiqueta =busqueda.group(2)
return etiqueta
def calcularEtiqueta(sustraendo:str,minuendo:str)-> str:
"""Resta la diferencia entre dos PC en hexadecimal
sustraendo - minuendo
- Si
- Sustraendo - minuendo
- En caso de error regresa 'e10' operando muy grande
Args:
sustraendo (str): Ejemplo '0x7'
minuendo (str): Ejemplo '0x1'
Returns:
str: Ejemplo '0x06'
"""
print(sustraendo)
print(minuendo)
sustraendo=int(sustraendo,16)
minuendo=int(minuendo,16)
resultado:int= sustraendo-minuendo
print(resultado)
if resultado <-127 or 128<resultado:
return 'e10' #E10 el salto relativo es muy lejano
# Si es negativa
elif resultado<0:
return convertirA2Hex(resultado)
# si es positiva
else:
return hex(resultado)
def bindigits(n:int, bits:int)->str:
"""Convierte a binario un numero de complemento A2 en caso de negativo, normal en caso de ser positivo
Args:
n (int): E.g 7
bits (int): eg 3
Returns:
str: E.g '001'
"""
s = bin(n & int("1"*bits, 2))[2:]
return ("{0:0>%s}" % (bits)).format(s)
def convertirA2Hex(numero:int)-> str:
"""Convierte un numero decimal a hexadecimal
- Si el número es decimal lo convierte a complemento A2
Args:
numero (int): Número decimal que se quiere convertir Eg. 07
Returns:
str: Eg. 0x07
"""
# cuantos bits ocupa el número hexadecimal
cuantosBits=(len(hex(numero))-2) *4 # el -2 es 0x, el 4 es porque 1 hex equivale a 4 bits
#numero convertido a binario
binario=bindigits(numero,cuantosBits)
return hex(int(binario, 2))
def precompilarPasada1(numLinea:int,modo:str,linea:str,pc: str)->precompilada:
# variables globales
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
mnemonico =busqueda.group(1)
etiqueta=busqueda.group(2)
# Consulta a la base de datos-------------------------------
consultaBd:BdRow = BaseDatos.bdSearch(mnemonico,6)
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(pc,16) +2) # El más 2 es porque todas las relativos usan 2 bytes
# Datos directos--------------------------------------
lineaPrecompilada=precompilada(numLinea,modo,pcActual,consultaBd.opcode,etiqueta,consultaBd.byte)
# Datos detivados-----------------------------------
lineaPrecompilada.bytesOcupados=consultaBd.byte
return lineaPrecompilada
def precompilarPasada2(lineaPrecompilada:precompilada,pcEtiqueta:str)->precompilada:
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(lineaPrecompilada.pcActual,16) ) # El más 2 es porque todas las relativos usan 2 bytes
lineaPrecompilada1:precompilada
# Calculamos el operando
operandoPrecompilado=calcularEtiqueta(pcEtiqueta,pcActual)
# Verificamos si el salto relaitvo no es tan grande
if operandoPrecompilado=='e10': # en caso de error salto muy lejando
lineaPrecompilada1=precompilada(0,'','','','',0)
lineaPrecompilada1.error='e10'
else:
operandoPrecompilado=operandoPrecompilado[2:]
# hacer una copia
lineaPrecompilada1=precompilada(lineaPrecompilada.numLinea,lineaPrecompilada.modo,hex(int(lineaPrecompilada.pcActual,16)-2),lineaPrecompilada.opcode,operandoPrecompilado,lineaPrecompilada.byte)
print(operandoPrecompilado)
return lineaPrecompilada1
#return lineaPrecompilada1 |
award_points | Awards points to user based on an event and the queue.
`event` is one of the `REVIEWED_` keys in constants.
`status` is one of the `STATUS_` keys in constants. | import datetime
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.db.models import Sum
import commonware.log
import waffle
import amo
import mkt.constants.comm as comm
from amo.utils import cache_ns_key
from mkt.comm.utils import create_comm_note
from mkt.site.mail import send_mail_jinja
from mkt.site.models import ManagerBase, ModelBase, skip_cache
from mkt.tags.models import Tag
from mkt.translations.fields import save_signal, TranslatedField
from mkt.users.models import UserProfile
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Webapp
user_log = commonware.log.getLogger('z.users')
QUEUE_TARAKO = 'tarako'
class CannedResponse(ModelBase):
name = TranslatedField()
response = TranslatedField(short=False)
sort_group = models.CharField(max_length=255)
class Meta:
db_table = 'cannedresponses'
def __unicode__(self):
return unicode(self.name)
models.signals.pre_save.connect(save_signal, sender=CannedResponse,
dispatch_uid='cannedresponses_translations')
class EditorSubscription(ModelBase):
user = models.ForeignKey(UserProfile)
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'editor_subscriptions'
class ReviewerScore(ModelBase):
user = models.ForeignKey(UserProfile, related_name='_reviewer_scores')
addon = models.ForeignKey(Webapp, blank=True, null=True, related_name='+')
score = models.SmallIntegerField()
# For automated point rewards.
note_key = models.SmallIntegerField(choices=amo.REVIEWED_CHOICES.items(),
default=0)
# For manual point rewards with a note.
note = models.CharField(max_length=255, blank=True)
class Meta:
db_table = 'reviewer_scores'
ordering = ('-created',)
@classmethod
def get_key(cls, key=None, invalidate=False):
namespace = 'riscore'
if not key: # Assuming we're invalidating the namespace.
cache_ns_key(namespace, invalidate)
return
else:
# Using cache_ns_key so each cache val is invalidated together.
ns_key = cache_ns_key(namespace, invalidate)
return '%s:%s' % (ns_key, key)
@classmethod
def get_event(cls, addon, status, **kwargs):
"""Return the review event type constant.
This is determined by the app type and the queue the addon is
currently in (which is determined from the status).
Note: We're not using addon.status because this is called after the
status has been updated by the reviewer action.
"""
if addon.is_packaged:
if status in amo.WEBAPPS_APPROVED_STATUSES:
return amo.REVIEWED_WEBAPP_UPDATE
else: # If it's not PUBLIC, assume it's a new submission.
return amo.REVIEWED_WEBAPP_PACKAGED
else: # It's a hosted app.
in_rereview = kwargs.pop('in_rereview', False)
if status in amo.WEBAPPS_APPROVED_STATUSES and in_rereview:
return amo.REVIEWED_WEBAPP_REREVIEW
else:
return amo.REVIEWED_WEBAPP_HOSTED
# MASKED: award_points function (lines 100-118)
@classmethod
def award_moderation_points(cls, user, addon, review_id):
"""Awards points to user based on moderated review."""
event = amo.REVIEWED_APP_REVIEW
score = amo.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info(
u'Awarding %s points to user %s for "%s" for review %s' % (
score, user, amo.REVIEWED_CHOICES[event], review_id))
@classmethod
def get_total(cls, user):
"""Returns total points by user."""
key = cls.get_key('get_total:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = (ReviewerScore.objects.no_cache().filter(user=user)
.aggregate(total=Sum('score'))
.values())[0]
if val is None:
val = 0
cache.set(key, val, None)
return val
@classmethod
def get_recent(cls, user, limit=5):
"""Returns most recent ReviewerScore records."""
key = cls.get_key('get_recent:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = ReviewerScore.objects.no_cache().filter(user=user)
val = list(val[:limit])
cache.set(key, val, None)
return val
@classmethod
def get_performance(cls, user):
"""Returns sum of reviewer points."""
key = cls.get_key('get_performance:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id]))
cache.set(key, val, None)
return val
@classmethod
def get_performance_since(cls, user, since):
"""
Returns sum of reviewer points since the given datetime.
"""
key = cls.get_key('get_performance:%s:%s' % (user.id, since.isoformat()))
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s AND
`reviewer_scores`.`created` >= %s
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id, since]))
cache.set(key, val, 3600)
return val
@classmethod
def _leaderboard_query(cls, since=None, types=None):
"""
Returns common SQL to leaderboard calls.
"""
query = (cls.objects
.values_list('user__id', 'user__display_name')
.annotate(total=Sum('score'))
.exclude(user__groups__name__in=('No Reviewer Incentives',
'Staff', 'Admins'))
.order_by('-total'))
if since is not None:
query = query.filter(created__gte=since)
if types is not None:
query = query.filter(note_key__in=types)
return query
@classmethod
def get_leaderboards(cls, user, days=7, types=None):
"""Returns leaderboards with ranking for the past given days.
This will return a dict of 3 items::
{'leader_top': [...],
'leader_near: [...],
'user_rank': (int)}
If the user is not in the leaderboard, or if the user is in the top 5,
'leader_near' will be an empty list and 'leader_top' will contain 5
elements instead of the normal 3.
"""
key = cls.get_key('get_leaderboards:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
week_ago = datetime.date.today() - datetime.timedelta(days=days)
leader_top = []
leader_near = []
query = cls._leaderboard_query(since=week_ago, types=types)
scores = []
user_rank = 0
in_leaderboard = False
for rank, row in enumerate(query, 1):
user_id, name, total = row
scores.append({
'user_id': user_id,
'name': name,
'rank': rank,
'total': int(total),
})
if user_id == user.id:
user_rank = rank
in_leaderboard = True
if not in_leaderboard:
leader_top = scores[:5]
else:
if user_rank <= 5: # User is in top 5, show top 5.
leader_top = scores[:5]
else:
leader_top = scores[:3]
leader_near = [scores[user_rank - 2], scores[user_rank - 1]]
try:
leader_near.append(scores[user_rank])
except IndexError:
pass # User is last on the leaderboard.
val = {
'leader_top': leader_top,
'leader_near': leader_near,
'user_rank': user_rank,
}
cache.set(key, val, None)
return val
@classmethod
def all_users_by_score(cls):
"""
Returns reviewers ordered by highest total points first.
"""
query = cls._leaderboard_query()
scores = []
for row in query:
user_id, name, total = row
user_level = len(amo.REVIEWED_LEVELS) - 1
for i, level in enumerate(amo.REVIEWED_LEVELS):
if total < level['points']:
user_level = i - 1
break
# Only show level if it changes.
if user_level < 0:
level = ''
else:
level = amo.REVIEWED_LEVELS[user_level]['name']
scores.append({
'user_id': user_id,
'name': name,
'total': int(total),
'level': level,
})
prev = None
for score in reversed(scores):
if score['level'] == prev:
score['level'] = ''
else:
prev = score['level']
return scores
class EscalationQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'escalation_queue'
class RereviewQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'rereview_queue'
@classmethod
def flag(cls, addon, event, message=None):
cls.objects.get_or_create(addon=addon)
if message:
amo.log(event, addon, addon.current_version,
details={'comments': message})
else:
amo.log(event, addon, addon.current_version)
# TODO: if we ever get rid of ActivityLog for reviewer notes, replace
# all flag calls to use the comm constant and not have to use
# ACTION_MAP.
create_comm_note(addon, addon.current_version, None, message,
note_type=comm.ACTION_MAP(event))
def send_tarako_mail(review):
if not waffle.switch_is_active('comm-dashboard'):
send_mail_jinja(
'Low-memory devices review {passed}'.format(
passed='passed' if review.passed else 'failed'),
'reviewers/emails/tarako_review_complete.txt',
{'review': review},
recipient_list=[a.email for a in review.app.authors.all()],
from_email=settings.MKT_REVIEWERS_EMAIL)
def tarako_passed(review):
"""Add the tarako tag to the app."""
tag = Tag(tag_text='tarako')
tag.save_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review)
def tarako_failed(review):
"""Remove the tarako tag from the app."""
tag = Tag(tag_text='tarako')
tag.remove_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review)
class AdditionalReviewManager(ManagerBase):
def unreviewed(self, queue, and_approved=False):
query = {
'passed': None,
'queue': queue,
}
if and_approved:
query['app__status__in'] = amo.WEBAPPS_APPROVED_STATUSES
return self.get_queryset().no_cache().filter(**query)
def latest_for_queue(self, queue):
try:
return self.get_queryset().filter(queue=queue).latest()
except AdditionalReview.DoesNotExist:
return None
class AdditionalReview(ModelBase):
app = models.ForeignKey(Webapp)
queue = models.CharField(max_length=30)
passed = models.NullBooleanField()
review_completed = models.DateTimeField(null=True)
comment = models.CharField(null=True, blank=True, max_length=255)
reviewer = models.ForeignKey('users.UserProfile', null=True, blank=True)
objects = AdditionalReviewManager()
class Meta:
db_table = 'additional_review'
get_latest_by = 'created'
@property
def pending(self):
return self.passed is None
@property
def failed(self):
return self.passed is False
def __init__(self, *args, **kwargs):
super(AdditionalReview, self).__init__(*args, **kwargs)
from mkt.reviewers.utils import log_reviewer_action
self.log_reviewer_action = log_reviewer_action
def execute_post_review_task(self):
"""
Call the correct post-review function for the queue.
"""
# TODO: Pull this function from somewhere based on self.queue.
if self.passed is None:
raise ValueError('cannot execute post-review task when unreviewed')
elif self.passed:
tarako_passed(self)
action = amo.LOG.PASS_ADDITIONAL_REVIEW
else:
tarako_failed(self)
action = amo.LOG.FAIL_ADDITIONAL_REVIEW
self.log_reviewer_action(
self.app, self.reviewer, self.comment or '', action,
queue=self.queue)
def cleanup_queues(sender, instance, **kwargs):
RereviewQueue.objects.filter(addon=instance).delete()
EscalationQueue.objects.filter(addon=instance).delete()
models.signals.post_delete.connect(cleanup_queues, sender=Webapp,
dispatch_uid='queue-addon-cleanup') | @classmethod
def award_points(cls, user, addon, status, **kwargs):
"""Awards points to user based on an event and the queue.
`event` is one of the `REVIEWED_` keys in constants.
`status` is one of the `STATUS_` keys in constants.
"""
event = cls.get_event(addon, status, **kwargs)
score = amo.REVIEWED_SCORES.get(event)
if score:
cls.objects.create(user=user, addon=addon, score=score,
note_key=event)
cls.get_key(invalidate=True)
user_log.info(
(u'Awarding %s points to user %s for "%s" for addon %s'
% (score, user, amo.REVIEWED_CHOICES[event], addon.id))
.encode('utf-8'))
return score | 100 | 118 | import datetime
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.db.models import Sum
import commonware.log
import waffle
import amo
import mkt.constants.comm as comm
from amo.utils import cache_ns_key
from mkt.comm.utils import create_comm_note
from mkt.site.mail import send_mail_jinja
from mkt.site.models import ManagerBase, ModelBase, skip_cache
from mkt.tags.models import Tag
from mkt.translations.fields import save_signal, TranslatedField
from mkt.users.models import UserProfile
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Webapp
user_log = commonware.log.getLogger('z.users')
QUEUE_TARAKO = 'tarako'
class CannedResponse(ModelBase):
name = TranslatedField()
response = TranslatedField(short=False)
sort_group = models.CharField(max_length=255)
class Meta:
db_table = 'cannedresponses'
def __unicode__(self):
return unicode(self.name)
models.signals.pre_save.connect(save_signal, sender=CannedResponse,
dispatch_uid='cannedresponses_translations')
class EditorSubscription(ModelBase):
user = models.ForeignKey(UserProfile)
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'editor_subscriptions'
class ReviewerScore(ModelBase):
user = models.ForeignKey(UserProfile, related_name='_reviewer_scores')
addon = models.ForeignKey(Webapp, blank=True, null=True, related_name='+')
score = models.SmallIntegerField()
# For automated point rewards.
note_key = models.SmallIntegerField(choices=amo.REVIEWED_CHOICES.items(),
default=0)
# For manual point rewards with a note.
note = models.CharField(max_length=255, blank=True)
class Meta:
db_table = 'reviewer_scores'
ordering = ('-created',)
@classmethod
def get_key(cls, key=None, invalidate=False):
namespace = 'riscore'
if not key: # Assuming we're invalidating the namespace.
cache_ns_key(namespace, invalidate)
return
else:
# Using cache_ns_key so each cache val is invalidated together.
ns_key = cache_ns_key(namespace, invalidate)
return '%s:%s' % (ns_key, key)
@classmethod
def get_event(cls, addon, status, **kwargs):
"""Return the review event type constant.
This is determined by the app type and the queue the addon is
currently in (which is determined from the status).
Note: We're not using addon.status because this is called after the
status has been updated by the reviewer action.
"""
if addon.is_packaged:
if status in amo.WEBAPPS_APPROVED_STATUSES:
return amo.REVIEWED_WEBAPP_UPDATE
else: # If it's not PUBLIC, assume it's a new submission.
return amo.REVIEWED_WEBAPP_PACKAGED
else: # It's a hosted app.
in_rereview = kwargs.pop('in_rereview', False)
if status in amo.WEBAPPS_APPROVED_STATUSES and in_rereview:
return amo.REVIEWED_WEBAPP_REREVIEW
else:
return amo.REVIEWED_WEBAPP_HOSTED
@classmethod
def award_points(cls, user, addon, status, **kwargs):
"""Awards points to user based on an event and the queue.
`event` is one of the `REVIEWED_` keys in constants.
`status` is one of the `STATUS_` keys in constants.
"""
event = cls.get_event(addon, status, **kwargs)
score = amo.REVIEWED_SCORES.get(event)
if score:
cls.objects.create(user=user, addon=addon, score=score,
note_key=event)
cls.get_key(invalidate=True)
user_log.info(
(u'Awarding %s points to user %s for "%s" for addon %s'
% (score, user, amo.REVIEWED_CHOICES[event], addon.id))
.encode('utf-8'))
return score
@classmethod
def award_moderation_points(cls, user, addon, review_id):
"""Awards points to user based on moderated review."""
event = amo.REVIEWED_APP_REVIEW
score = amo.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info(
u'Awarding %s points to user %s for "%s" for review %s' % (
score, user, amo.REVIEWED_CHOICES[event], review_id))
@classmethod
def get_total(cls, user):
"""Returns total points by user."""
key = cls.get_key('get_total:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = (ReviewerScore.objects.no_cache().filter(user=user)
.aggregate(total=Sum('score'))
.values())[0]
if val is None:
val = 0
cache.set(key, val, None)
return val
@classmethod
def get_recent(cls, user, limit=5):
"""Returns most recent ReviewerScore records."""
key = cls.get_key('get_recent:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = ReviewerScore.objects.no_cache().filter(user=user)
val = list(val[:limit])
cache.set(key, val, None)
return val
@classmethod
def get_performance(cls, user):
"""Returns sum of reviewer points."""
key = cls.get_key('get_performance:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id]))
cache.set(key, val, None)
return val
@classmethod
def get_performance_since(cls, user, since):
"""
Returns sum of reviewer points since the given datetime.
"""
key = cls.get_key('get_performance:%s:%s' % (user.id, since.isoformat()))
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s AND
`reviewer_scores`.`created` >= %s
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id, since]))
cache.set(key, val, 3600)
return val
@classmethod
def _leaderboard_query(cls, since=None, types=None):
"""
Returns common SQL to leaderboard calls.
"""
query = (cls.objects
.values_list('user__id', 'user__display_name')
.annotate(total=Sum('score'))
.exclude(user__groups__name__in=('No Reviewer Incentives',
'Staff', 'Admins'))
.order_by('-total'))
if since is not None:
query = query.filter(created__gte=since)
if types is not None:
query = query.filter(note_key__in=types)
return query
@classmethod
def get_leaderboards(cls, user, days=7, types=None):
"""Returns leaderboards with ranking for the past given days.
This will return a dict of 3 items::
{'leader_top': [...],
'leader_near: [...],
'user_rank': (int)}
If the user is not in the leaderboard, or if the user is in the top 5,
'leader_near' will be an empty list and 'leader_top' will contain 5
elements instead of the normal 3.
"""
key = cls.get_key('get_leaderboards:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
week_ago = datetime.date.today() - datetime.timedelta(days=days)
leader_top = []
leader_near = []
query = cls._leaderboard_query(since=week_ago, types=types)
scores = []
user_rank = 0
in_leaderboard = False
for rank, row in enumerate(query, 1):
user_id, name, total = row
scores.append({
'user_id': user_id,
'name': name,
'rank': rank,
'total': int(total),
})
if user_id == user.id:
user_rank = rank
in_leaderboard = True
if not in_leaderboard:
leader_top = scores[:5]
else:
if user_rank <= 5: # User is in top 5, show top 5.
leader_top = scores[:5]
else:
leader_top = scores[:3]
leader_near = [scores[user_rank - 2], scores[user_rank - 1]]
try:
leader_near.append(scores[user_rank])
except IndexError:
pass # User is last on the leaderboard.
val = {
'leader_top': leader_top,
'leader_near': leader_near,
'user_rank': user_rank,
}
cache.set(key, val, None)
return val
@classmethod
def all_users_by_score(cls):
"""
Returns reviewers ordered by highest total points first.
"""
query = cls._leaderboard_query()
scores = []
for row in query:
user_id, name, total = row
user_level = len(amo.REVIEWED_LEVELS) - 1
for i, level in enumerate(amo.REVIEWED_LEVELS):
if total < level['points']:
user_level = i - 1
break
# Only show level if it changes.
if user_level < 0:
level = ''
else:
level = amo.REVIEWED_LEVELS[user_level]['name']
scores.append({
'user_id': user_id,
'name': name,
'total': int(total),
'level': level,
})
prev = None
for score in reversed(scores):
if score['level'] == prev:
score['level'] = ''
else:
prev = score['level']
return scores
class EscalationQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'escalation_queue'
class RereviewQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'rereview_queue'
@classmethod
def flag(cls, addon, event, message=None):
cls.objects.get_or_create(addon=addon)
if message:
amo.log(event, addon, addon.current_version,
details={'comments': message})
else:
amo.log(event, addon, addon.current_version)
# TODO: if we ever get rid of ActivityLog for reviewer notes, replace
# all flag calls to use the comm constant and not have to use
# ACTION_MAP.
create_comm_note(addon, addon.current_version, None, message,
note_type=comm.ACTION_MAP(event))
def send_tarako_mail(review):
if not waffle.switch_is_active('comm-dashboard'):
send_mail_jinja(
'Low-memory devices review {passed}'.format(
passed='passed' if review.passed else 'failed'),
'reviewers/emails/tarako_review_complete.txt',
{'review': review},
recipient_list=[a.email for a in review.app.authors.all()],
from_email=settings.MKT_REVIEWERS_EMAIL)
def tarako_passed(review):
"""Add the tarako tag to the app."""
tag = Tag(tag_text='tarako')
tag.save_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review)
def tarako_failed(review):
"""Remove the tarako tag from the app."""
tag = Tag(tag_text='tarako')
tag.remove_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review)
class AdditionalReviewManager(ManagerBase):
def unreviewed(self, queue, and_approved=False):
query = {
'passed': None,
'queue': queue,
}
if and_approved:
query['app__status__in'] = amo.WEBAPPS_APPROVED_STATUSES
return self.get_queryset().no_cache().filter(**query)
def latest_for_queue(self, queue):
try:
return self.get_queryset().filter(queue=queue).latest()
except AdditionalReview.DoesNotExist:
return None
class AdditionalReview(ModelBase):
app = models.ForeignKey(Webapp)
queue = models.CharField(max_length=30)
passed = models.NullBooleanField()
review_completed = models.DateTimeField(null=True)
comment = models.CharField(null=True, blank=True, max_length=255)
reviewer = models.ForeignKey('users.UserProfile', null=True, blank=True)
objects = AdditionalReviewManager()
class Meta:
db_table = 'additional_review'
get_latest_by = 'created'
@property
def pending(self):
return self.passed is None
@property
def failed(self):
return self.passed is False
def __init__(self, *args, **kwargs):
super(AdditionalReview, self).__init__(*args, **kwargs)
from mkt.reviewers.utils import log_reviewer_action
self.log_reviewer_action = log_reviewer_action
def execute_post_review_task(self):
"""
Call the correct post-review function for the queue.
"""
# TODO: Pull this function from somewhere based on self.queue.
if self.passed is None:
raise ValueError('cannot execute post-review task when unreviewed')
elif self.passed:
tarako_passed(self)
action = amo.LOG.PASS_ADDITIONAL_REVIEW
else:
tarako_failed(self)
action = amo.LOG.FAIL_ADDITIONAL_REVIEW
self.log_reviewer_action(
self.app, self.reviewer, self.comment or '', action,
queue=self.queue)
def cleanup_queues(sender, instance, **kwargs):
RereviewQueue.objects.filter(addon=instance).delete()
EscalationQueue.objects.filter(addon=instance).delete()
models.signals.post_delete.connect(cleanup_queues, sender=Webapp,
dispatch_uid='queue-addon-cleanup')
|
bgr2ycbcr | bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
separate: if true, will returng the channels as
separate images
Input:
uint8, [0, 255]
float, [0, 1] | """
BasicSR/codes/dataops/common.py (8-Nov-20)
https://github.com/victorca25/BasicSR/blob/dev2/codes/dataops/common.py
"""
import os
import math
import pickle
import random
import numpy as np
import torch
import cv2
import logging
import copy
from torchvision.utils import make_grid
#from dataops.colors import *
from .colors import *
#from dataops.debug import tmp_vis, describe_numpy, describe_tensor
####################
# Files & IO
####################
###################### get image path list ######################
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.dng', '.DNG', '.webp','.npy', '.NPY']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def _get_paths_from_images(path):
'''get image path list from image folder'''
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
def _get_paths_from_lmdb(dataroot):
'''get image path list from lmdb'''
import lmdb
env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)
keys_cache_file = os.path.join(dataroot, '_keys_cache.p')
logger = logging.getLogger('base')
if os.path.isfile(keys_cache_file):
logger.info('Read lmdb keys from cache: {}'.format(keys_cache_file))
keys = pickle.load(open(keys_cache_file, "rb"))
else:
with env.begin(write=False) as txn:
logger.info('Creating lmdb keys cache: {}'.format(keys_cache_file))
keys = [key.decode('ascii') for key, _ in txn.cursor()]
pickle.dump(keys, open(keys_cache_file, 'wb'))
paths = sorted([key for key in keys if not key.endswith('.meta')])
return env, paths
def get_image_paths(data_type, dataroot):
'''get image path list
support lmdb or image files'''
env, paths = None, None
if dataroot is not None:
if data_type == 'lmdb':
env, paths = _get_paths_from_lmdb(dataroot)
elif data_type == 'img':
paths = sorted(_get_paths_from_images(dataroot))
else:
raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return env, paths
###################### read images ######################
def _read_lmdb_img(env, path):
with env.begin(write=False) as txn:
buf = txn.get(path.encode('ascii'))
buf_meta = txn.get((path + '.meta').encode('ascii')).decode('ascii')
img_flat = np.frombuffer(buf, dtype=np.uint8)
H, W, C = [int(s) for s in buf_meta.split(',')]
img = img_flat.reshape(H, W, C)
return img
def read_img(env, path, out_nc=3, fix_channels=True):
'''
Reads image using cv2 (rawpy if dng) or from lmdb by default
(can also use using PIL instead of cv2)
Arguments:
out_nc: Desired number of channels
fix_channels: changes the images to the desired number of channels
Output:
Numpy uint8, HWC, BGR, [0,255] by default
'''
img = None
if env is None: # img
if(path[-3:].lower() == 'dng'): # if image is a DNG
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
if(path[-3:].lower() == 'npy'): # if image is a NPY numpy array
with open(path, 'rb') as f:
img = np.load(f)
else: # else, if image can be read by cv2
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
#TODO: add variable detecting if cv2 is not available and try PIL instead
# elif: # using PIL instead of OpenCV
# img = Image.open(path).convert('RGB')
# else: # For other images unrecognized by cv2
# import matplotlib.pyplot as plt
# img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
else:
img = _read_lmdb_img(env, path)
# if not img:
# raise ValueError(f"Failed to read image: {path}")
if fix_channels:
img = fix_img_channels(img, out_nc)
return img
def fix_img_channels(img, out_nc):
'''
fix image channels to the expected number
'''
# if image has only 2 dimensions, add "channel" dimension (1)
if img.ndim == 2:
#img = img[..., np.newaxis] #alt
#img = np.expand_dims(img, axis=2)
img = np.tile(np.expand_dims(img, axis=2), (1, 1, 3))
# special case: properly remove alpha channel
if out_nc == 3 and img.shape[2] == 4:
img = bgra2rgb(img)
# remove all extra channels
elif img.shape[2] > out_nc:
img = img[:, :, :out_nc]
# if alpha is expected, add solid alpha channel
elif img.shape[2] == 3 and out_nc == 4:
img = np.dstack((img, np.full(img.shape[:-1], 255, dtype=np.uint8)))
return img
####################
# image processing
# process on numpy image
####################
def bgra2rgb(img):
'''
cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) has an issue removing the alpha channel,
this gets rid of wrong transparent colors that can harm training
'''
if img.shape[2] == 4:
#b, g, r, a = cv2.split((img*255).astype(np.uint8))
b, g, r, a = cv2.split((img.astype(np.uint8)))
b = cv2.bitwise_and(b, b, mask=a)
g = cv2.bitwise_and(g, g, mask=a)
r = cv2.bitwise_and(r, r, mask=a)
#return cv2.merge([b, g, r]).astype(np.float32)/255.
return cv2.merge([b, g, r])
return img
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
# Note: OpenCV uses inverted channels BGR, instead of RGB.
# If images are loaded with something other than OpenCV,
# check that the channels are in the correct order and use
# the alternative conversion functions.
#if in_c == 4 and tar_type == 'RGB-A': # BGRA to BGR, remove alpha channel
#return [cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) for img in img_list]
#return [bgra2rgb(img) for img in img_list]
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'RGB-LAB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_BGR2LAB) for img in img_list]
elif in_c == 3 and tar_type == 'LAB-RGB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_LAB2BGR) for img in img_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
# MASKED: bgr2ycbcr function (lines 217-251)
'''
def ycbcr2rgb_(img, only_y=True):
"""same as matlab ycbcr2rgb
(Note: this implementation is the original from BasicSR, but
appears to be for ycrcb, like cv2)
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
# original (for ycrcb):
rlt = np.matmul(img_ , [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
#alternative conversion:
# xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
# img_[:, :, [1, 2]] -= 128
# rlt = img_.dot(xform.T)
np.putmask(rlt, rlt > 255, 255)
np.putmask(rlt, rlt < 0, 0)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
def ycbcr2rgb(img, only_y=True):
'''
bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters to MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
mat = np.linalg.inv(mat.T) * 255
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
#TODO: TMP RGB version, to check (PIL)
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
#TODO: TMP RGB version, to check (PIL)
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
#TODO: this should probably be elsewhere (augmentations.py)
def augment(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
#rot90n = rot and random.random() < 0.5
def _augment(img):
if hflip: img = np.flip(img, axis=1) #img[:, ::-1, :]
if vflip: img = np.flip(img, axis=0) #img[::-1, :, :]
#if rot90: img = img.transpose(1, 0, 2)
if rot90: img = np.rot90(img, 1) #90 degrees # In PIL: img.transpose(Image.ROTATE_90)
#if rot90n: img = np.rot90(img, -1) #-90 degrees
return img
return [_augment(img) for img in img_list]
####################
# Normalization functions
####################
#TODO: Could also automatically detect the possible range with min and max, like in def ssim()
def denorm(x, min_max=(-1.0, 1.0)):
'''
Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh)
'''
out = (x - min_max[0]) / (min_max[1] - min_max[0])
if isinstance(x, torch.Tensor):
return out.clamp(0, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, 0, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
def norm(x):
#Normalize (z-norm) from [0,1] range to [-1,1]
out = (x - 0.5) * 2.0
if isinstance(x, torch.Tensor):
return out.clamp(-1, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, -1, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
####################
# np and tensor conversions
####################
#2tensor
def np2tensor(img, bgr2rgb=True, data_range=1., normalize=False, change_range=True, add_batch=True):
"""
Converts a numpy image array into a Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added
"""
if not isinstance(img, np.ndarray): #images expected to be uint8 -> 255
raise TypeError("Got unexpected object type, expected np.ndarray")
#check how many channels the image has, then condition, like in my BasicSR. ie. RGB, RGBA, Gray
#if bgr2rgb:
#img = img[:, :, [2, 1, 0]] #BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
if np.issubdtype(img.dtype, np.integer):
info = np.iinfo
elif np.issubdtype(img.dtype, np.floating):
info = np.finfo
img = img*data_range/info(img.dtype).max #uint8 = /255
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float() #"HWC to CHW" and "numpy to tensor"
if bgr2rgb:
if img.shape[0] == 3: #RGB
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img = bgr_to_rgb(img)
elif img.shape[0] == 4: #RGBA
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.)
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0) # Add fake batch dimension = 1 . squeeze() will remove the dimensions of size 1
if normalize:
img = norm(img)
return img
#2np
def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255,
denormalize=False, change_range=True, imtype=np.uint8):
"""
Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
if not isinstance(img, torch.Tensor):
raise TypeError("Got unexpected object type, expected torch.Tensor")
n_dim = img.dim()
#TODO: Check: could denormalize here in tensor form instead, but end result is the same
img = img.float().cpu()
if n_dim == 4 or n_dim == 3:
#if n_dim == 4, has to convert to 3 dimensions, either removing batch or by creating a grid
if n_dim == 4 and remove_batch:
if img.shape[0] > 1:
# leave only the first image in the batch
img = img[0,...]
else:
# remove a fake batch dimension
img = img.squeeze()
# squeeze removes batch and channel of grayscale images (dimensions = 1)
if len(img.shape) < 3:
#add back the lost channel dimension
img = img.unsqueeze(dim=0)
# convert images in batch (BCHW) to a grid of all images (C B*H B*W)
else:
n_img = len(img)
img = make_grid(img, nrow=int(math.sqrt(n_img)), normalize=False)
if img.shape[0] == 3 and rgb2bgr: #RGB
#RGB to BGR -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgb_to_bgr(img).numpy()
elif img.shape[0] == 4 and rgb2bgr: #RGBA
#RGBA to BGRA -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # "CHW to HWC" -> # HWC, BGR
elif n_dim == 2:
img_np = img.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
#if rgb2bgr:
#img_np = img_np[[2, 1, 0], :, :] #RGB to BGR -> in numpy, if using OpenCV, else not needed. Only if image has colors.
#TODO: Check: could denormalize in the begining in tensor form instead
if denormalize:
img_np = denorm(img_np) #denormalize if needed
if change_range:
img_np = np.clip(data_range*img_np,0,data_range).round() #clip to the data_range
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
#has to be in range (0,255) before changing to np.uint8, else np.float32
return img_np.astype(imtype)
####################
# Prepare Images
####################
# https://github.com/sunreef/BlindSR/blob/master/src/image_utils.py
def patchify_tensor(features, patch_size, overlap=10):
batch_size, channels, height, width = features.size()
effective_patch_size = patch_size - overlap
n_patches_height = (height // effective_patch_size)
n_patches_width = (width // effective_patch_size)
if n_patches_height * effective_patch_size < height:
n_patches_height += 1
if n_patches_width * effective_patch_size < width:
n_patches_width += 1
patches = []
for b in range(batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, height - patch_size)
patch_start_width = min(w * effective_patch_size, width - patch_size)
patches.append(features[b:b+1, :,
patch_start_height: patch_start_height + patch_size,
patch_start_width: patch_start_width + patch_size])
return torch.cat(patches, 0)
def recompose_tensor(patches, full_height, full_width, overlap=10):
batch_size, channels, patch_size, _ = patches.size()
effective_patch_size = patch_size - overlap
n_patches_height = (full_height // effective_patch_size)
n_patches_width = (full_width // effective_patch_size)
if n_patches_height * effective_patch_size < full_height:
n_patches_height += 1
if n_patches_width * effective_patch_size < full_width:
n_patches_width += 1
n_patches = n_patches_height * n_patches_width
if batch_size % n_patches != 0:
print("Error: The number of patches provided to the recompose function does not match the number of patches in each image.")
final_batch_size = batch_size // n_patches
blending_in = torch.linspace(0.1, 1.0, overlap)
blending_out = torch.linspace(1.0, 0.1, overlap)
middle_part = torch.ones(patch_size - 2 * overlap)
blending_profile = torch.cat([blending_in, middle_part, blending_out], 0)
horizontal_blending = blending_profile[None].repeat(patch_size, 1)
vertical_blending = blending_profile[:, None].repeat(1, patch_size)
blending_patch = horizontal_blending * vertical_blending
blending_image = torch.zeros(1, channels, full_height, full_width)
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
blending_image[0, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += blending_patch[None]
recomposed_tensor = torch.zeros(final_batch_size, channels, full_height, full_width)
if patches.is_cuda:
blending_patch = blending_patch.cuda()
blending_image = blending_image.cuda()
recomposed_tensor = recomposed_tensor.cuda()
patch_index = 0
for b in range(final_batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
recomposed_tensor[b, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += patches[patch_index] * blending_patch
patch_index += 1
recomposed_tensor /= blending_image
return recomposed_tensor
#TODO: imresize could be an independent file (imresize.py)
####################
# Matlab imresize
####################
# These next functions are all interpolation methods. x is the distance from the left pixel center
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def box(x):
return ((-0.5 <= x) & (x < 0.5)) * 1.0
def linear(x):
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
def lanczos2(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/2) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 2) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 2))
def lanczos3(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/3) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 3) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 3))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply kernel
if (scale < 1) and (antialiasing):
weights = scale * kernel(distance_to_center * scale)
else:
weights = kernel(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize(img, scale, antialiasing=True, interpolation=None):
# The scale should be the same for H and W
# input: img: CHW RGB [0,1]
# output: CHW RGB [0,1] w/o round
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[0, i, :] = img_aug[0, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[1, i, :] = img_aug[1, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[2, i, :] = img_aug[2, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[0, :, i] = out_1_aug[0, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[1, :, i] = out_1_aug[1, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[2, :, i] = out_1_aug[2, :, idx:idx + kernel_width].mv(weights_W[i])
return out_2
def imresize_np(img, scale, antialiasing=True, interpolation=None):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC BGR [0,1]
# output: HWC BGR [0,1] w/o round
change_range = False
if img.max() > 1:
img_type = img.dtype
if np.issubdtype(img_type, np.integer):
info = np.iinfo
elif np.issubdtype(img_type, np.floating):
info = np.finfo
img = img/info(img_type).max
change_range = True
img = torch.from_numpy(img)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i])
out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i])
out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i])
out_2 = out_2.numpy().clip(0,1)
if change_range:
out_2 = out_2*info(img_type).max #uint8 = 255
out_2 = out_2.astype(img_type)
return out_2
if __name__ == '__main__':
# test imresize function
# read images
img = cv2.imread('test.png')
img = img * 1.0 / 255
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
# imresize
scale = 1 / 4
import time
total_time = 0
for i in range(10):
start_time = time.time()
rlt = imresize(img, scale, antialiasing=True)
use_time = time.time() - start_time
total_time += use_time
print('average time: {}'.format(total_time / 10))
import torchvision.utils
torchvision.utils.save_image(
(rlt * 255).round() / 255, 'rlt.png', nrow=1, padding=0, normalize=False) | def bgr2ycbcr(img, only_y=True, separate=False):
'''bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
separate: if true, will returng the channels as
separate images
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
if separate:
rlt = rlt.astype(in_img_type)
# y, cb, cr
return rlt[:, :, 0], rlt[:, :, 1], rlt[:, :, 2]
else:
return rlt.astype(in_img_type) | 217 | 251 | """
BasicSR/codes/dataops/common.py (8-Nov-20)
https://github.com/victorca25/BasicSR/blob/dev2/codes/dataops/common.py
"""
import os
import math
import pickle
import random
import numpy as np
import torch
import cv2
import logging
import copy
from torchvision.utils import make_grid
#from dataops.colors import *
from .colors import *
#from dataops.debug import tmp_vis, describe_numpy, describe_tensor
####################
# Files & IO
####################
###################### get image path list ######################
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.dng', '.DNG', '.webp','.npy', '.NPY']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def _get_paths_from_images(path):
'''get image path list from image folder'''
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
def _get_paths_from_lmdb(dataroot):
'''get image path list from lmdb'''
import lmdb
env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)
keys_cache_file = os.path.join(dataroot, '_keys_cache.p')
logger = logging.getLogger('base')
if os.path.isfile(keys_cache_file):
logger.info('Read lmdb keys from cache: {}'.format(keys_cache_file))
keys = pickle.load(open(keys_cache_file, "rb"))
else:
with env.begin(write=False) as txn:
logger.info('Creating lmdb keys cache: {}'.format(keys_cache_file))
keys = [key.decode('ascii') for key, _ in txn.cursor()]
pickle.dump(keys, open(keys_cache_file, 'wb'))
paths = sorted([key for key in keys if not key.endswith('.meta')])
return env, paths
def get_image_paths(data_type, dataroot):
'''get image path list
support lmdb or image files'''
env, paths = None, None
if dataroot is not None:
if data_type == 'lmdb':
env, paths = _get_paths_from_lmdb(dataroot)
elif data_type == 'img':
paths = sorted(_get_paths_from_images(dataroot))
else:
raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return env, paths
###################### read images ######################
def _read_lmdb_img(env, path):
with env.begin(write=False) as txn:
buf = txn.get(path.encode('ascii'))
buf_meta = txn.get((path + '.meta').encode('ascii')).decode('ascii')
img_flat = np.frombuffer(buf, dtype=np.uint8)
H, W, C = [int(s) for s in buf_meta.split(',')]
img = img_flat.reshape(H, W, C)
return img
def read_img(env, path, out_nc=3, fix_channels=True):
'''
Reads image using cv2 (rawpy if dng) or from lmdb by default
(can also use using PIL instead of cv2)
Arguments:
out_nc: Desired number of channels
fix_channels: changes the images to the desired number of channels
Output:
Numpy uint8, HWC, BGR, [0,255] by default
'''
img = None
if env is None: # img
if(path[-3:].lower() == 'dng'): # if image is a DNG
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
if(path[-3:].lower() == 'npy'): # if image is a NPY numpy array
with open(path, 'rb') as f:
img = np.load(f)
else: # else, if image can be read by cv2
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
#TODO: add variable detecting if cv2 is not available and try PIL instead
# elif: # using PIL instead of OpenCV
# img = Image.open(path).convert('RGB')
# else: # For other images unrecognized by cv2
# import matplotlib.pyplot as plt
# img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
else:
img = _read_lmdb_img(env, path)
# if not img:
# raise ValueError(f"Failed to read image: {path}")
if fix_channels:
img = fix_img_channels(img, out_nc)
return img
def fix_img_channels(img, out_nc):
'''
fix image channels to the expected number
'''
# if image has only 2 dimensions, add "channel" dimension (1)
if img.ndim == 2:
#img = img[..., np.newaxis] #alt
#img = np.expand_dims(img, axis=2)
img = np.tile(np.expand_dims(img, axis=2), (1, 1, 3))
# special case: properly remove alpha channel
if out_nc == 3 and img.shape[2] == 4:
img = bgra2rgb(img)
# remove all extra channels
elif img.shape[2] > out_nc:
img = img[:, :, :out_nc]
# if alpha is expected, add solid alpha channel
elif img.shape[2] == 3 and out_nc == 4:
img = np.dstack((img, np.full(img.shape[:-1], 255, dtype=np.uint8)))
return img
####################
# image processing
# process on numpy image
####################
def bgra2rgb(img):
'''
cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) has an issue removing the alpha channel,
this gets rid of wrong transparent colors that can harm training
'''
if img.shape[2] == 4:
#b, g, r, a = cv2.split((img*255).astype(np.uint8))
b, g, r, a = cv2.split((img.astype(np.uint8)))
b = cv2.bitwise_and(b, b, mask=a)
g = cv2.bitwise_and(g, g, mask=a)
r = cv2.bitwise_and(r, r, mask=a)
#return cv2.merge([b, g, r]).astype(np.float32)/255.
return cv2.merge([b, g, r])
return img
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
# Note: OpenCV uses inverted channels BGR, instead of RGB.
# If images are loaded with something other than OpenCV,
# check that the channels are in the correct order and use
# the alternative conversion functions.
#if in_c == 4 and tar_type == 'RGB-A': # BGRA to BGR, remove alpha channel
#return [cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) for img in img_list]
#return [bgra2rgb(img) for img in img_list]
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'RGB-LAB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_BGR2LAB) for img in img_list]
elif in_c == 3 and tar_type == 'LAB-RGB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_LAB2BGR) for img in img_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True, separate=False):
'''bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
separate: if true, will returng the channels as
separate images
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
if separate:
rlt = rlt.astype(in_img_type)
# y, cb, cr
return rlt[:, :, 0], rlt[:, :, 1], rlt[:, :, 2]
else:
return rlt.astype(in_img_type)
'''
def ycbcr2rgb_(img, only_y=True):
"""same as matlab ycbcr2rgb
(Note: this implementation is the original from BasicSR, but
appears to be for ycrcb, like cv2)
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
# original (for ycrcb):
rlt = np.matmul(img_ , [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
#alternative conversion:
# xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
# img_[:, :, [1, 2]] -= 128
# rlt = img_.dot(xform.T)
np.putmask(rlt, rlt > 255, 255)
np.putmask(rlt, rlt < 0, 0)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
def ycbcr2rgb(img, only_y=True):
'''
bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters to MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
mat = np.linalg.inv(mat.T) * 255
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
#TODO: TMP RGB version, to check (PIL)
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
#TODO: TMP RGB version, to check (PIL)
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
#TODO: this should probably be elsewhere (augmentations.py)
def augment(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
#rot90n = rot and random.random() < 0.5
def _augment(img):
if hflip: img = np.flip(img, axis=1) #img[:, ::-1, :]
if vflip: img = np.flip(img, axis=0) #img[::-1, :, :]
#if rot90: img = img.transpose(1, 0, 2)
if rot90: img = np.rot90(img, 1) #90 degrees # In PIL: img.transpose(Image.ROTATE_90)
#if rot90n: img = np.rot90(img, -1) #-90 degrees
return img
return [_augment(img) for img in img_list]
####################
# Normalization functions
####################
#TODO: Could also automatically detect the possible range with min and max, like in def ssim()
def denorm(x, min_max=(-1.0, 1.0)):
'''
Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh)
'''
out = (x - min_max[0]) / (min_max[1] - min_max[0])
if isinstance(x, torch.Tensor):
return out.clamp(0, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, 0, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
def norm(x):
#Normalize (z-norm) from [0,1] range to [-1,1]
out = (x - 0.5) * 2.0
if isinstance(x, torch.Tensor):
return out.clamp(-1, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, -1, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
####################
# np and tensor conversions
####################
#2tensor
def np2tensor(img, bgr2rgb=True, data_range=1., normalize=False, change_range=True, add_batch=True):
"""
Converts a numpy image array into a Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added
"""
if not isinstance(img, np.ndarray): #images expected to be uint8 -> 255
raise TypeError("Got unexpected object type, expected np.ndarray")
#check how many channels the image has, then condition, like in my BasicSR. ie. RGB, RGBA, Gray
#if bgr2rgb:
#img = img[:, :, [2, 1, 0]] #BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
if np.issubdtype(img.dtype, np.integer):
info = np.iinfo
elif np.issubdtype(img.dtype, np.floating):
info = np.finfo
img = img*data_range/info(img.dtype).max #uint8 = /255
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float() #"HWC to CHW" and "numpy to tensor"
if bgr2rgb:
if img.shape[0] == 3: #RGB
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img = bgr_to_rgb(img)
elif img.shape[0] == 4: #RGBA
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.)
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0) # Add fake batch dimension = 1 . squeeze() will remove the dimensions of size 1
if normalize:
img = norm(img)
return img
#2np
def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255,
denormalize=False, change_range=True, imtype=np.uint8):
"""
Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
if not isinstance(img, torch.Tensor):
raise TypeError("Got unexpected object type, expected torch.Tensor")
n_dim = img.dim()
#TODO: Check: could denormalize here in tensor form instead, but end result is the same
img = img.float().cpu()
if n_dim == 4 or n_dim == 3:
#if n_dim == 4, has to convert to 3 dimensions, either removing batch or by creating a grid
if n_dim == 4 and remove_batch:
if img.shape[0] > 1:
# leave only the first image in the batch
img = img[0,...]
else:
# remove a fake batch dimension
img = img.squeeze()
# squeeze removes batch and channel of grayscale images (dimensions = 1)
if len(img.shape) < 3:
#add back the lost channel dimension
img = img.unsqueeze(dim=0)
# convert images in batch (BCHW) to a grid of all images (C B*H B*W)
else:
n_img = len(img)
img = make_grid(img, nrow=int(math.sqrt(n_img)), normalize=False)
if img.shape[0] == 3 and rgb2bgr: #RGB
#RGB to BGR -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgb_to_bgr(img).numpy()
elif img.shape[0] == 4 and rgb2bgr: #RGBA
#RGBA to BGRA -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # "CHW to HWC" -> # HWC, BGR
elif n_dim == 2:
img_np = img.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
#if rgb2bgr:
#img_np = img_np[[2, 1, 0], :, :] #RGB to BGR -> in numpy, if using OpenCV, else not needed. Only if image has colors.
#TODO: Check: could denormalize in the begining in tensor form instead
if denormalize:
img_np = denorm(img_np) #denormalize if needed
if change_range:
img_np = np.clip(data_range*img_np,0,data_range).round() #clip to the data_range
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
#has to be in range (0,255) before changing to np.uint8, else np.float32
return img_np.astype(imtype)
####################
# Prepare Images
####################
# https://github.com/sunreef/BlindSR/blob/master/src/image_utils.py
def patchify_tensor(features, patch_size, overlap=10):
batch_size, channels, height, width = features.size()
effective_patch_size = patch_size - overlap
n_patches_height = (height // effective_patch_size)
n_patches_width = (width // effective_patch_size)
if n_patches_height * effective_patch_size < height:
n_patches_height += 1
if n_patches_width * effective_patch_size < width:
n_patches_width += 1
patches = []
for b in range(batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, height - patch_size)
patch_start_width = min(w * effective_patch_size, width - patch_size)
patches.append(features[b:b+1, :,
patch_start_height: patch_start_height + patch_size,
patch_start_width: patch_start_width + patch_size])
return torch.cat(patches, 0)
def recompose_tensor(patches, full_height, full_width, overlap=10):
batch_size, channels, patch_size, _ = patches.size()
effective_patch_size = patch_size - overlap
n_patches_height = (full_height // effective_patch_size)
n_patches_width = (full_width // effective_patch_size)
if n_patches_height * effective_patch_size < full_height:
n_patches_height += 1
if n_patches_width * effective_patch_size < full_width:
n_patches_width += 1
n_patches = n_patches_height * n_patches_width
if batch_size % n_patches != 0:
print("Error: The number of patches provided to the recompose function does not match the number of patches in each image.")
final_batch_size = batch_size // n_patches
blending_in = torch.linspace(0.1, 1.0, overlap)
blending_out = torch.linspace(1.0, 0.1, overlap)
middle_part = torch.ones(patch_size - 2 * overlap)
blending_profile = torch.cat([blending_in, middle_part, blending_out], 0)
horizontal_blending = blending_profile[None].repeat(patch_size, 1)
vertical_blending = blending_profile[:, None].repeat(1, patch_size)
blending_patch = horizontal_blending * vertical_blending
blending_image = torch.zeros(1, channels, full_height, full_width)
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
blending_image[0, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += blending_patch[None]
recomposed_tensor = torch.zeros(final_batch_size, channels, full_height, full_width)
if patches.is_cuda:
blending_patch = blending_patch.cuda()
blending_image = blending_image.cuda()
recomposed_tensor = recomposed_tensor.cuda()
patch_index = 0
for b in range(final_batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
recomposed_tensor[b, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += patches[patch_index] * blending_patch
patch_index += 1
recomposed_tensor /= blending_image
return recomposed_tensor
#TODO: imresize could be an independent file (imresize.py)
####################
# Matlab imresize
####################
# These next functions are all interpolation methods. x is the distance from the left pixel center
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def box(x):
return ((-0.5 <= x) & (x < 0.5)) * 1.0
def linear(x):
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
def lanczos2(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/2) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 2) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 2))
def lanczos3(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/3) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 3) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 3))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply kernel
if (scale < 1) and (antialiasing):
weights = scale * kernel(distance_to_center * scale)
else:
weights = kernel(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize(img, scale, antialiasing=True, interpolation=None):
# The scale should be the same for H and W
# input: img: CHW RGB [0,1]
# output: CHW RGB [0,1] w/o round
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[0, i, :] = img_aug[0, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[1, i, :] = img_aug[1, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[2, i, :] = img_aug[2, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[0, :, i] = out_1_aug[0, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[1, :, i] = out_1_aug[1, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[2, :, i] = out_1_aug[2, :, idx:idx + kernel_width].mv(weights_W[i])
return out_2
def imresize_np(img, scale, antialiasing=True, interpolation=None):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC BGR [0,1]
# output: HWC BGR [0,1] w/o round
change_range = False
if img.max() > 1:
img_type = img.dtype
if np.issubdtype(img_type, np.integer):
info = np.iinfo
elif np.issubdtype(img_type, np.floating):
info = np.finfo
img = img/info(img_type).max
change_range = True
img = torch.from_numpy(img)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i])
out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i])
out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i])
out_2 = out_2.numpy().clip(0,1)
if change_range:
out_2 = out_2*info(img_type).max #uint8 = 255
out_2 = out_2.astype(img_type)
return out_2
if __name__ == '__main__':
# test imresize function
# read images
img = cv2.imread('test.png')
img = img * 1.0 / 255
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
# imresize
scale = 1 / 4
import time
total_time = 0
for i in range(10):
start_time = time.time()
rlt = imresize(img, scale, antialiasing=True)
use_time = time.time() - start_time
total_time += use_time
print('average time: {}'.format(total_time / 10))
import torchvision.utils
torchvision.utils.save_image(
(rlt * 255).round() / 255, 'rlt.png', nrow=1, padding=0, normalize=False)
|
ycbcr2rgb | bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters to MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1] | """
BasicSR/codes/dataops/common.py (8-Nov-20)
https://github.com/victorca25/BasicSR/blob/dev2/codes/dataops/common.py
"""
import os
import math
import pickle
import random
import numpy as np
import torch
import cv2
import logging
import copy
from torchvision.utils import make_grid
#from dataops.colors import *
from .colors import *
#from dataops.debug import tmp_vis, describe_numpy, describe_tensor
####################
# Files & IO
####################
###################### get image path list ######################
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.dng', '.DNG', '.webp','.npy', '.NPY']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def _get_paths_from_images(path):
'''get image path list from image folder'''
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
def _get_paths_from_lmdb(dataroot):
'''get image path list from lmdb'''
import lmdb
env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)
keys_cache_file = os.path.join(dataroot, '_keys_cache.p')
logger = logging.getLogger('base')
if os.path.isfile(keys_cache_file):
logger.info('Read lmdb keys from cache: {}'.format(keys_cache_file))
keys = pickle.load(open(keys_cache_file, "rb"))
else:
with env.begin(write=False) as txn:
logger.info('Creating lmdb keys cache: {}'.format(keys_cache_file))
keys = [key.decode('ascii') for key, _ in txn.cursor()]
pickle.dump(keys, open(keys_cache_file, 'wb'))
paths = sorted([key for key in keys if not key.endswith('.meta')])
return env, paths
def get_image_paths(data_type, dataroot):
'''get image path list
support lmdb or image files'''
env, paths = None, None
if dataroot is not None:
if data_type == 'lmdb':
env, paths = _get_paths_from_lmdb(dataroot)
elif data_type == 'img':
paths = sorted(_get_paths_from_images(dataroot))
else:
raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return env, paths
###################### read images ######################
def _read_lmdb_img(env, path):
with env.begin(write=False) as txn:
buf = txn.get(path.encode('ascii'))
buf_meta = txn.get((path + '.meta').encode('ascii')).decode('ascii')
img_flat = np.frombuffer(buf, dtype=np.uint8)
H, W, C = [int(s) for s in buf_meta.split(',')]
img = img_flat.reshape(H, W, C)
return img
def read_img(env, path, out_nc=3, fix_channels=True):
'''
Reads image using cv2 (rawpy if dng) or from lmdb by default
(can also use using PIL instead of cv2)
Arguments:
out_nc: Desired number of channels
fix_channels: changes the images to the desired number of channels
Output:
Numpy uint8, HWC, BGR, [0,255] by default
'''
img = None
if env is None: # img
if(path[-3:].lower() == 'dng'): # if image is a DNG
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
if(path[-3:].lower() == 'npy'): # if image is a NPY numpy array
with open(path, 'rb') as f:
img = np.load(f)
else: # else, if image can be read by cv2
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
#TODO: add variable detecting if cv2 is not available and try PIL instead
# elif: # using PIL instead of OpenCV
# img = Image.open(path).convert('RGB')
# else: # For other images unrecognized by cv2
# import matplotlib.pyplot as plt
# img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
else:
img = _read_lmdb_img(env, path)
# if not img:
# raise ValueError(f"Failed to read image: {path}")
if fix_channels:
img = fix_img_channels(img, out_nc)
return img
def fix_img_channels(img, out_nc):
'''
fix image channels to the expected number
'''
# if image has only 2 dimensions, add "channel" dimension (1)
if img.ndim == 2:
#img = img[..., np.newaxis] #alt
#img = np.expand_dims(img, axis=2)
img = np.tile(np.expand_dims(img, axis=2), (1, 1, 3))
# special case: properly remove alpha channel
if out_nc == 3 and img.shape[2] == 4:
img = bgra2rgb(img)
# remove all extra channels
elif img.shape[2] > out_nc:
img = img[:, :, :out_nc]
# if alpha is expected, add solid alpha channel
elif img.shape[2] == 3 and out_nc == 4:
img = np.dstack((img, np.full(img.shape[:-1], 255, dtype=np.uint8)))
return img
####################
# image processing
# process on numpy image
####################
def bgra2rgb(img):
'''
cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) has an issue removing the alpha channel,
this gets rid of wrong transparent colors that can harm training
'''
if img.shape[2] == 4:
#b, g, r, a = cv2.split((img*255).astype(np.uint8))
b, g, r, a = cv2.split((img.astype(np.uint8)))
b = cv2.bitwise_and(b, b, mask=a)
g = cv2.bitwise_and(g, g, mask=a)
r = cv2.bitwise_and(r, r, mask=a)
#return cv2.merge([b, g, r]).astype(np.float32)/255.
return cv2.merge([b, g, r])
return img
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
# Note: OpenCV uses inverted channels BGR, instead of RGB.
# If images are loaded with something other than OpenCV,
# check that the channels are in the correct order and use
# the alternative conversion functions.
#if in_c == 4 and tar_type == 'RGB-A': # BGRA to BGR, remove alpha channel
#return [cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) for img in img_list]
#return [bgra2rgb(img) for img in img_list]
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'RGB-LAB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_BGR2LAB) for img in img_list]
elif in_c == 3 and tar_type == 'LAB-RGB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_LAB2BGR) for img in img_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True, separate=False):
'''bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
separate: if true, will returng the channels as
separate images
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
if separate:
rlt = rlt.astype(in_img_type)
# y, cb, cr
return rlt[:, :, 0], rlt[:, :, 1], rlt[:, :, 2]
else:
return rlt.astype(in_img_type)
'''
def ycbcr2rgb_(img, only_y=True):
"""same as matlab ycbcr2rgb
(Note: this implementation is the original from BasicSR, but
appears to be for ycrcb, like cv2)
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
# original (for ycrcb):
rlt = np.matmul(img_ , [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
#alternative conversion:
# xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
# img_[:, :, [1, 2]] -= 128
# rlt = img_.dot(xform.T)
np.putmask(rlt, rlt > 255, 255)
np.putmask(rlt, rlt < 0, 0)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
# MASKED: ycbcr2rgb function (lines 289-320)
'''
#TODO: TMP RGB version, to check (PIL)
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
#TODO: TMP RGB version, to check (PIL)
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
#TODO: this should probably be elsewhere (augmentations.py)
def augment(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
#rot90n = rot and random.random() < 0.5
def _augment(img):
if hflip: img = np.flip(img, axis=1) #img[:, ::-1, :]
if vflip: img = np.flip(img, axis=0) #img[::-1, :, :]
#if rot90: img = img.transpose(1, 0, 2)
if rot90: img = np.rot90(img, 1) #90 degrees # In PIL: img.transpose(Image.ROTATE_90)
#if rot90n: img = np.rot90(img, -1) #-90 degrees
return img
return [_augment(img) for img in img_list]
####################
# Normalization functions
####################
#TODO: Could also automatically detect the possible range with min and max, like in def ssim()
def denorm(x, min_max=(-1.0, 1.0)):
'''
Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh)
'''
out = (x - min_max[0]) / (min_max[1] - min_max[0])
if isinstance(x, torch.Tensor):
return out.clamp(0, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, 0, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
def norm(x):
#Normalize (z-norm) from [0,1] range to [-1,1]
out = (x - 0.5) * 2.0
if isinstance(x, torch.Tensor):
return out.clamp(-1, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, -1, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
####################
# np and tensor conversions
####################
#2tensor
def np2tensor(img, bgr2rgb=True, data_range=1., normalize=False, change_range=True, add_batch=True):
"""
Converts a numpy image array into a Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added
"""
if not isinstance(img, np.ndarray): #images expected to be uint8 -> 255
raise TypeError("Got unexpected object type, expected np.ndarray")
#check how many channels the image has, then condition, like in my BasicSR. ie. RGB, RGBA, Gray
#if bgr2rgb:
#img = img[:, :, [2, 1, 0]] #BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
if np.issubdtype(img.dtype, np.integer):
info = np.iinfo
elif np.issubdtype(img.dtype, np.floating):
info = np.finfo
img = img*data_range/info(img.dtype).max #uint8 = /255
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float() #"HWC to CHW" and "numpy to tensor"
if bgr2rgb:
if img.shape[0] == 3: #RGB
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img = bgr_to_rgb(img)
elif img.shape[0] == 4: #RGBA
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.)
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0) # Add fake batch dimension = 1 . squeeze() will remove the dimensions of size 1
if normalize:
img = norm(img)
return img
#2np
def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255,
denormalize=False, change_range=True, imtype=np.uint8):
"""
Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
if not isinstance(img, torch.Tensor):
raise TypeError("Got unexpected object type, expected torch.Tensor")
n_dim = img.dim()
#TODO: Check: could denormalize here in tensor form instead, but end result is the same
img = img.float().cpu()
if n_dim == 4 or n_dim == 3:
#if n_dim == 4, has to convert to 3 dimensions, either removing batch or by creating a grid
if n_dim == 4 and remove_batch:
if img.shape[0] > 1:
# leave only the first image in the batch
img = img[0,...]
else:
# remove a fake batch dimension
img = img.squeeze()
# squeeze removes batch and channel of grayscale images (dimensions = 1)
if len(img.shape) < 3:
#add back the lost channel dimension
img = img.unsqueeze(dim=0)
# convert images in batch (BCHW) to a grid of all images (C B*H B*W)
else:
n_img = len(img)
img = make_grid(img, nrow=int(math.sqrt(n_img)), normalize=False)
if img.shape[0] == 3 and rgb2bgr: #RGB
#RGB to BGR -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgb_to_bgr(img).numpy()
elif img.shape[0] == 4 and rgb2bgr: #RGBA
#RGBA to BGRA -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # "CHW to HWC" -> # HWC, BGR
elif n_dim == 2:
img_np = img.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
#if rgb2bgr:
#img_np = img_np[[2, 1, 0], :, :] #RGB to BGR -> in numpy, if using OpenCV, else not needed. Only if image has colors.
#TODO: Check: could denormalize in the begining in tensor form instead
if denormalize:
img_np = denorm(img_np) #denormalize if needed
if change_range:
img_np = np.clip(data_range*img_np,0,data_range).round() #clip to the data_range
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
#has to be in range (0,255) before changing to np.uint8, else np.float32
return img_np.astype(imtype)
####################
# Prepare Images
####################
# https://github.com/sunreef/BlindSR/blob/master/src/image_utils.py
def patchify_tensor(features, patch_size, overlap=10):
batch_size, channels, height, width = features.size()
effective_patch_size = patch_size - overlap
n_patches_height = (height // effective_patch_size)
n_patches_width = (width // effective_patch_size)
if n_patches_height * effective_patch_size < height:
n_patches_height += 1
if n_patches_width * effective_patch_size < width:
n_patches_width += 1
patches = []
for b in range(batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, height - patch_size)
patch_start_width = min(w * effective_patch_size, width - patch_size)
patches.append(features[b:b+1, :,
patch_start_height: patch_start_height + patch_size,
patch_start_width: patch_start_width + patch_size])
return torch.cat(patches, 0)
def recompose_tensor(patches, full_height, full_width, overlap=10):
batch_size, channels, patch_size, _ = patches.size()
effective_patch_size = patch_size - overlap
n_patches_height = (full_height // effective_patch_size)
n_patches_width = (full_width // effective_patch_size)
if n_patches_height * effective_patch_size < full_height:
n_patches_height += 1
if n_patches_width * effective_patch_size < full_width:
n_patches_width += 1
n_patches = n_patches_height * n_patches_width
if batch_size % n_patches != 0:
print("Error: The number of patches provided to the recompose function does not match the number of patches in each image.")
final_batch_size = batch_size // n_patches
blending_in = torch.linspace(0.1, 1.0, overlap)
blending_out = torch.linspace(1.0, 0.1, overlap)
middle_part = torch.ones(patch_size - 2 * overlap)
blending_profile = torch.cat([blending_in, middle_part, blending_out], 0)
horizontal_blending = blending_profile[None].repeat(patch_size, 1)
vertical_blending = blending_profile[:, None].repeat(1, patch_size)
blending_patch = horizontal_blending * vertical_blending
blending_image = torch.zeros(1, channels, full_height, full_width)
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
blending_image[0, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += blending_patch[None]
recomposed_tensor = torch.zeros(final_batch_size, channels, full_height, full_width)
if patches.is_cuda:
blending_patch = blending_patch.cuda()
blending_image = blending_image.cuda()
recomposed_tensor = recomposed_tensor.cuda()
patch_index = 0
for b in range(final_batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
recomposed_tensor[b, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += patches[patch_index] * blending_patch
patch_index += 1
recomposed_tensor /= blending_image
return recomposed_tensor
#TODO: imresize could be an independent file (imresize.py)
####################
# Matlab imresize
####################
# These next functions are all interpolation methods. x is the distance from the left pixel center
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def box(x):
return ((-0.5 <= x) & (x < 0.5)) * 1.0
def linear(x):
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
def lanczos2(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/2) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 2) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 2))
def lanczos3(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/3) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 3) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 3))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply kernel
if (scale < 1) and (antialiasing):
weights = scale * kernel(distance_to_center * scale)
else:
weights = kernel(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize(img, scale, antialiasing=True, interpolation=None):
# The scale should be the same for H and W
# input: img: CHW RGB [0,1]
# output: CHW RGB [0,1] w/o round
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[0, i, :] = img_aug[0, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[1, i, :] = img_aug[1, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[2, i, :] = img_aug[2, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[0, :, i] = out_1_aug[0, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[1, :, i] = out_1_aug[1, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[2, :, i] = out_1_aug[2, :, idx:idx + kernel_width].mv(weights_W[i])
return out_2
def imresize_np(img, scale, antialiasing=True, interpolation=None):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC BGR [0,1]
# output: HWC BGR [0,1] w/o round
change_range = False
if img.max() > 1:
img_type = img.dtype
if np.issubdtype(img_type, np.integer):
info = np.iinfo
elif np.issubdtype(img_type, np.floating):
info = np.finfo
img = img/info(img_type).max
change_range = True
img = torch.from_numpy(img)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i])
out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i])
out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i])
out_2 = out_2.numpy().clip(0,1)
if change_range:
out_2 = out_2*info(img_type).max #uint8 = 255
out_2 = out_2.astype(img_type)
return out_2
if __name__ == '__main__':
# test imresize function
# read images
img = cv2.imread('test.png')
img = img * 1.0 / 255
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
# imresize
scale = 1 / 4
import time
total_time = 0
for i in range(10):
start_time = time.time()
rlt = imresize(img, scale, antialiasing=True)
use_time = time.time() - start_time
total_time += use_time
print('average time: {}'.format(total_time / 10))
import torchvision.utils
torchvision.utils.save_image(
(rlt * 255).round() / 255, 'rlt.png', nrow=1, padding=0, normalize=False) | def ycbcr2rgb(img, only_y=True):
'''
bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters to MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
mat = np.linalg.inv(mat.T) * 255
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type) | 289 | 320 | """
BasicSR/codes/dataops/common.py (8-Nov-20)
https://github.com/victorca25/BasicSR/blob/dev2/codes/dataops/common.py
"""
import os
import math
import pickle
import random
import numpy as np
import torch
import cv2
import logging
import copy
from torchvision.utils import make_grid
#from dataops.colors import *
from .colors import *
#from dataops.debug import tmp_vis, describe_numpy, describe_tensor
####################
# Files & IO
####################
###################### get image path list ######################
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.dng', '.DNG', '.webp','.npy', '.NPY']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def _get_paths_from_images(path):
'''get image path list from image folder'''
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
def _get_paths_from_lmdb(dataroot):
'''get image path list from lmdb'''
import lmdb
env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)
keys_cache_file = os.path.join(dataroot, '_keys_cache.p')
logger = logging.getLogger('base')
if os.path.isfile(keys_cache_file):
logger.info('Read lmdb keys from cache: {}'.format(keys_cache_file))
keys = pickle.load(open(keys_cache_file, "rb"))
else:
with env.begin(write=False) as txn:
logger.info('Creating lmdb keys cache: {}'.format(keys_cache_file))
keys = [key.decode('ascii') for key, _ in txn.cursor()]
pickle.dump(keys, open(keys_cache_file, 'wb'))
paths = sorted([key for key in keys if not key.endswith('.meta')])
return env, paths
def get_image_paths(data_type, dataroot):
'''get image path list
support lmdb or image files'''
env, paths = None, None
if dataroot is not None:
if data_type == 'lmdb':
env, paths = _get_paths_from_lmdb(dataroot)
elif data_type == 'img':
paths = sorted(_get_paths_from_images(dataroot))
else:
raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return env, paths
###################### read images ######################
def _read_lmdb_img(env, path):
with env.begin(write=False) as txn:
buf = txn.get(path.encode('ascii'))
buf_meta = txn.get((path + '.meta').encode('ascii')).decode('ascii')
img_flat = np.frombuffer(buf, dtype=np.uint8)
H, W, C = [int(s) for s in buf_meta.split(',')]
img = img_flat.reshape(H, W, C)
return img
def read_img(env, path, out_nc=3, fix_channels=True):
'''
Reads image using cv2 (rawpy if dng) or from lmdb by default
(can also use using PIL instead of cv2)
Arguments:
out_nc: Desired number of channels
fix_channels: changes the images to the desired number of channels
Output:
Numpy uint8, HWC, BGR, [0,255] by default
'''
img = None
if env is None: # img
if(path[-3:].lower() == 'dng'): # if image is a DNG
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
if(path[-3:].lower() == 'npy'): # if image is a NPY numpy array
with open(path, 'rb') as f:
img = np.load(f)
else: # else, if image can be read by cv2
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
#TODO: add variable detecting if cv2 is not available and try PIL instead
# elif: # using PIL instead of OpenCV
# img = Image.open(path).convert('RGB')
# else: # For other images unrecognized by cv2
# import matplotlib.pyplot as plt
# img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
else:
img = _read_lmdb_img(env, path)
# if not img:
# raise ValueError(f"Failed to read image: {path}")
if fix_channels:
img = fix_img_channels(img, out_nc)
return img
def fix_img_channels(img, out_nc):
'''
fix image channels to the expected number
'''
# if image has only 2 dimensions, add "channel" dimension (1)
if img.ndim == 2:
#img = img[..., np.newaxis] #alt
#img = np.expand_dims(img, axis=2)
img = np.tile(np.expand_dims(img, axis=2), (1, 1, 3))
# special case: properly remove alpha channel
if out_nc == 3 and img.shape[2] == 4:
img = bgra2rgb(img)
# remove all extra channels
elif img.shape[2] > out_nc:
img = img[:, :, :out_nc]
# if alpha is expected, add solid alpha channel
elif img.shape[2] == 3 and out_nc == 4:
img = np.dstack((img, np.full(img.shape[:-1], 255, dtype=np.uint8)))
return img
####################
# image processing
# process on numpy image
####################
def bgra2rgb(img):
'''
cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) has an issue removing the alpha channel,
this gets rid of wrong transparent colors that can harm training
'''
if img.shape[2] == 4:
#b, g, r, a = cv2.split((img*255).astype(np.uint8))
b, g, r, a = cv2.split((img.astype(np.uint8)))
b = cv2.bitwise_and(b, b, mask=a)
g = cv2.bitwise_and(g, g, mask=a)
r = cv2.bitwise_and(r, r, mask=a)
#return cv2.merge([b, g, r]).astype(np.float32)/255.
return cv2.merge([b, g, r])
return img
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
# Note: OpenCV uses inverted channels BGR, instead of RGB.
# If images are loaded with something other than OpenCV,
# check that the channels are in the correct order and use
# the alternative conversion functions.
#if in_c == 4 and tar_type == 'RGB-A': # BGRA to BGR, remove alpha channel
#return [cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) for img in img_list]
#return [bgra2rgb(img) for img in img_list]
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'RGB-LAB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_BGR2LAB) for img in img_list]
elif in_c == 3 and tar_type == 'LAB-RGB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_LAB2BGR) for img in img_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True, separate=False):
'''bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
separate: if true, will returng the channels as
separate images
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
if separate:
rlt = rlt.astype(in_img_type)
# y, cb, cr
return rlt[:, :, 0], rlt[:, :, 1], rlt[:, :, 2]
else:
return rlt.astype(in_img_type)
'''
def ycbcr2rgb_(img, only_y=True):
"""same as matlab ycbcr2rgb
(Note: this implementation is the original from BasicSR, but
appears to be for ycrcb, like cv2)
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
# original (for ycrcb):
rlt = np.matmul(img_ , [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
#alternative conversion:
# xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
# img_[:, :, [1, 2]] -= 128
# rlt = img_.dot(xform.T)
np.putmask(rlt, rlt > 255, 255)
np.putmask(rlt, rlt < 0, 0)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
def ycbcr2rgb(img, only_y=True):
'''
bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters to MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
mat = np.linalg.inv(mat.T) * 255
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
#TODO: TMP RGB version, to check (PIL)
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
#TODO: TMP RGB version, to check (PIL)
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
#TODO: this should probably be elsewhere (augmentations.py)
def augment(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
#rot90n = rot and random.random() < 0.5
def _augment(img):
if hflip: img = np.flip(img, axis=1) #img[:, ::-1, :]
if vflip: img = np.flip(img, axis=0) #img[::-1, :, :]
#if rot90: img = img.transpose(1, 0, 2)
if rot90: img = np.rot90(img, 1) #90 degrees # In PIL: img.transpose(Image.ROTATE_90)
#if rot90n: img = np.rot90(img, -1) #-90 degrees
return img
return [_augment(img) for img in img_list]
####################
# Normalization functions
####################
#TODO: Could also automatically detect the possible range with min and max, like in def ssim()
def denorm(x, min_max=(-1.0, 1.0)):
'''
Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh)
'''
out = (x - min_max[0]) / (min_max[1] - min_max[0])
if isinstance(x, torch.Tensor):
return out.clamp(0, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, 0, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
def norm(x):
#Normalize (z-norm) from [0,1] range to [-1,1]
out = (x - 0.5) * 2.0
if isinstance(x, torch.Tensor):
return out.clamp(-1, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, -1, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
####################
# np and tensor conversions
####################
#2tensor
def np2tensor(img, bgr2rgb=True, data_range=1., normalize=False, change_range=True, add_batch=True):
"""
Converts a numpy image array into a Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added
"""
if not isinstance(img, np.ndarray): #images expected to be uint8 -> 255
raise TypeError("Got unexpected object type, expected np.ndarray")
#check how many channels the image has, then condition, like in my BasicSR. ie. RGB, RGBA, Gray
#if bgr2rgb:
#img = img[:, :, [2, 1, 0]] #BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
if np.issubdtype(img.dtype, np.integer):
info = np.iinfo
elif np.issubdtype(img.dtype, np.floating):
info = np.finfo
img = img*data_range/info(img.dtype).max #uint8 = /255
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float() #"HWC to CHW" and "numpy to tensor"
if bgr2rgb:
if img.shape[0] == 3: #RGB
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img = bgr_to_rgb(img)
elif img.shape[0] == 4: #RGBA
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.)
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0) # Add fake batch dimension = 1 . squeeze() will remove the dimensions of size 1
if normalize:
img = norm(img)
return img
#2np
def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255,
denormalize=False, change_range=True, imtype=np.uint8):
"""
Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
if not isinstance(img, torch.Tensor):
raise TypeError("Got unexpected object type, expected torch.Tensor")
n_dim = img.dim()
#TODO: Check: could denormalize here in tensor form instead, but end result is the same
img = img.float().cpu()
if n_dim == 4 or n_dim == 3:
#if n_dim == 4, has to convert to 3 dimensions, either removing batch or by creating a grid
if n_dim == 4 and remove_batch:
if img.shape[0] > 1:
# leave only the first image in the batch
img = img[0,...]
else:
# remove a fake batch dimension
img = img.squeeze()
# squeeze removes batch and channel of grayscale images (dimensions = 1)
if len(img.shape) < 3:
#add back the lost channel dimension
img = img.unsqueeze(dim=0)
# convert images in batch (BCHW) to a grid of all images (C B*H B*W)
else:
n_img = len(img)
img = make_grid(img, nrow=int(math.sqrt(n_img)), normalize=False)
if img.shape[0] == 3 and rgb2bgr: #RGB
#RGB to BGR -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgb_to_bgr(img).numpy()
elif img.shape[0] == 4 and rgb2bgr: #RGBA
#RGBA to BGRA -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # "CHW to HWC" -> # HWC, BGR
elif n_dim == 2:
img_np = img.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
#if rgb2bgr:
#img_np = img_np[[2, 1, 0], :, :] #RGB to BGR -> in numpy, if using OpenCV, else not needed. Only if image has colors.
#TODO: Check: could denormalize in the begining in tensor form instead
if denormalize:
img_np = denorm(img_np) #denormalize if needed
if change_range:
img_np = np.clip(data_range*img_np,0,data_range).round() #clip to the data_range
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
#has to be in range (0,255) before changing to np.uint8, else np.float32
return img_np.astype(imtype)
####################
# Prepare Images
####################
# https://github.com/sunreef/BlindSR/blob/master/src/image_utils.py
def patchify_tensor(features, patch_size, overlap=10):
batch_size, channels, height, width = features.size()
effective_patch_size = patch_size - overlap
n_patches_height = (height // effective_patch_size)
n_patches_width = (width // effective_patch_size)
if n_patches_height * effective_patch_size < height:
n_patches_height += 1
if n_patches_width * effective_patch_size < width:
n_patches_width += 1
patches = []
for b in range(batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, height - patch_size)
patch_start_width = min(w * effective_patch_size, width - patch_size)
patches.append(features[b:b+1, :,
patch_start_height: patch_start_height + patch_size,
patch_start_width: patch_start_width + patch_size])
return torch.cat(patches, 0)
def recompose_tensor(patches, full_height, full_width, overlap=10):
batch_size, channels, patch_size, _ = patches.size()
effective_patch_size = patch_size - overlap
n_patches_height = (full_height // effective_patch_size)
n_patches_width = (full_width // effective_patch_size)
if n_patches_height * effective_patch_size < full_height:
n_patches_height += 1
if n_patches_width * effective_patch_size < full_width:
n_patches_width += 1
n_patches = n_patches_height * n_patches_width
if batch_size % n_patches != 0:
print("Error: The number of patches provided to the recompose function does not match the number of patches in each image.")
final_batch_size = batch_size // n_patches
blending_in = torch.linspace(0.1, 1.0, overlap)
blending_out = torch.linspace(1.0, 0.1, overlap)
middle_part = torch.ones(patch_size - 2 * overlap)
blending_profile = torch.cat([blending_in, middle_part, blending_out], 0)
horizontal_blending = blending_profile[None].repeat(patch_size, 1)
vertical_blending = blending_profile[:, None].repeat(1, patch_size)
blending_patch = horizontal_blending * vertical_blending
blending_image = torch.zeros(1, channels, full_height, full_width)
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
blending_image[0, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += blending_patch[None]
recomposed_tensor = torch.zeros(final_batch_size, channels, full_height, full_width)
if patches.is_cuda:
blending_patch = blending_patch.cuda()
blending_image = blending_image.cuda()
recomposed_tensor = recomposed_tensor.cuda()
patch_index = 0
for b in range(final_batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
recomposed_tensor[b, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += patches[patch_index] * blending_patch
patch_index += 1
recomposed_tensor /= blending_image
return recomposed_tensor
#TODO: imresize could be an independent file (imresize.py)
####################
# Matlab imresize
####################
# These next functions are all interpolation methods. x is the distance from the left pixel center
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def box(x):
return ((-0.5 <= x) & (x < 0.5)) * 1.0
def linear(x):
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
def lanczos2(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/2) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 2) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 2))
def lanczos3(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/3) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 3) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 3))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply kernel
if (scale < 1) and (antialiasing):
weights = scale * kernel(distance_to_center * scale)
else:
weights = kernel(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize(img, scale, antialiasing=True, interpolation=None):
# The scale should be the same for H and W
# input: img: CHW RGB [0,1]
# output: CHW RGB [0,1] w/o round
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[0, i, :] = img_aug[0, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[1, i, :] = img_aug[1, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[2, i, :] = img_aug[2, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[0, :, i] = out_1_aug[0, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[1, :, i] = out_1_aug[1, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[2, :, i] = out_1_aug[2, :, idx:idx + kernel_width].mv(weights_W[i])
return out_2
def imresize_np(img, scale, antialiasing=True, interpolation=None):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC BGR [0,1]
# output: HWC BGR [0,1] w/o round
change_range = False
if img.max() > 1:
img_type = img.dtype
if np.issubdtype(img_type, np.integer):
info = np.iinfo
elif np.issubdtype(img_type, np.floating):
info = np.finfo
img = img/info(img_type).max
change_range = True
img = torch.from_numpy(img)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i])
out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i])
out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i])
out_2 = out_2.numpy().clip(0,1)
if change_range:
out_2 = out_2*info(img_type).max #uint8 = 255
out_2 = out_2.astype(img_type)
return out_2
if __name__ == '__main__':
# test imresize function
# read images
img = cv2.imread('test.png')
img = img * 1.0 / 255
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
# imresize
scale = 1 / 4
import time
total_time = 0
for i in range(10):
start_time = time.time()
rlt = imresize(img, scale, antialiasing=True)
use_time = time.time() - start_time
total_time += use_time
print('average time: {}'.format(total_time / 10))
import torchvision.utils
torchvision.utils.save_image(
(rlt * 255).round() / 255, 'rlt.png', nrow=1, padding=0, normalize=False)
|
np2tensor | Converts a numpy image array into a Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added | """
BasicSR/codes/dataops/common.py (8-Nov-20)
https://github.com/victorca25/BasicSR/blob/dev2/codes/dataops/common.py
"""
import os
import math
import pickle
import random
import numpy as np
import torch
import cv2
import logging
import copy
from torchvision.utils import make_grid
#from dataops.colors import *
from .colors import *
#from dataops.debug import tmp_vis, describe_numpy, describe_tensor
####################
# Files & IO
####################
###################### get image path list ######################
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.dng', '.DNG', '.webp','.npy', '.NPY']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def _get_paths_from_images(path):
'''get image path list from image folder'''
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
def _get_paths_from_lmdb(dataroot):
'''get image path list from lmdb'''
import lmdb
env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)
keys_cache_file = os.path.join(dataroot, '_keys_cache.p')
logger = logging.getLogger('base')
if os.path.isfile(keys_cache_file):
logger.info('Read lmdb keys from cache: {}'.format(keys_cache_file))
keys = pickle.load(open(keys_cache_file, "rb"))
else:
with env.begin(write=False) as txn:
logger.info('Creating lmdb keys cache: {}'.format(keys_cache_file))
keys = [key.decode('ascii') for key, _ in txn.cursor()]
pickle.dump(keys, open(keys_cache_file, 'wb'))
paths = sorted([key for key in keys if not key.endswith('.meta')])
return env, paths
def get_image_paths(data_type, dataroot):
'''get image path list
support lmdb or image files'''
env, paths = None, None
if dataroot is not None:
if data_type == 'lmdb':
env, paths = _get_paths_from_lmdb(dataroot)
elif data_type == 'img':
paths = sorted(_get_paths_from_images(dataroot))
else:
raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return env, paths
###################### read images ######################
def _read_lmdb_img(env, path):
with env.begin(write=False) as txn:
buf = txn.get(path.encode('ascii'))
buf_meta = txn.get((path + '.meta').encode('ascii')).decode('ascii')
img_flat = np.frombuffer(buf, dtype=np.uint8)
H, W, C = [int(s) for s in buf_meta.split(',')]
img = img_flat.reshape(H, W, C)
return img
def read_img(env, path, out_nc=3, fix_channels=True):
'''
Reads image using cv2 (rawpy if dng) or from lmdb by default
(can also use using PIL instead of cv2)
Arguments:
out_nc: Desired number of channels
fix_channels: changes the images to the desired number of channels
Output:
Numpy uint8, HWC, BGR, [0,255] by default
'''
img = None
if env is None: # img
if(path[-3:].lower() == 'dng'): # if image is a DNG
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
if(path[-3:].lower() == 'npy'): # if image is a NPY numpy array
with open(path, 'rb') as f:
img = np.load(f)
else: # else, if image can be read by cv2
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
#TODO: add variable detecting if cv2 is not available and try PIL instead
# elif: # using PIL instead of OpenCV
# img = Image.open(path).convert('RGB')
# else: # For other images unrecognized by cv2
# import matplotlib.pyplot as plt
# img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
else:
img = _read_lmdb_img(env, path)
# if not img:
# raise ValueError(f"Failed to read image: {path}")
if fix_channels:
img = fix_img_channels(img, out_nc)
return img
def fix_img_channels(img, out_nc):
'''
fix image channels to the expected number
'''
# if image has only 2 dimensions, add "channel" dimension (1)
if img.ndim == 2:
#img = img[..., np.newaxis] #alt
#img = np.expand_dims(img, axis=2)
img = np.tile(np.expand_dims(img, axis=2), (1, 1, 3))
# special case: properly remove alpha channel
if out_nc == 3 and img.shape[2] == 4:
img = bgra2rgb(img)
# remove all extra channels
elif img.shape[2] > out_nc:
img = img[:, :, :out_nc]
# if alpha is expected, add solid alpha channel
elif img.shape[2] == 3 and out_nc == 4:
img = np.dstack((img, np.full(img.shape[:-1], 255, dtype=np.uint8)))
return img
####################
# image processing
# process on numpy image
####################
def bgra2rgb(img):
'''
cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) has an issue removing the alpha channel,
this gets rid of wrong transparent colors that can harm training
'''
if img.shape[2] == 4:
#b, g, r, a = cv2.split((img*255).astype(np.uint8))
b, g, r, a = cv2.split((img.astype(np.uint8)))
b = cv2.bitwise_and(b, b, mask=a)
g = cv2.bitwise_and(g, g, mask=a)
r = cv2.bitwise_and(r, r, mask=a)
#return cv2.merge([b, g, r]).astype(np.float32)/255.
return cv2.merge([b, g, r])
return img
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
# Note: OpenCV uses inverted channels BGR, instead of RGB.
# If images are loaded with something other than OpenCV,
# check that the channels are in the correct order and use
# the alternative conversion functions.
#if in_c == 4 and tar_type == 'RGB-A': # BGRA to BGR, remove alpha channel
#return [cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) for img in img_list]
#return [bgra2rgb(img) for img in img_list]
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'RGB-LAB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_BGR2LAB) for img in img_list]
elif in_c == 3 and tar_type == 'LAB-RGB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_LAB2BGR) for img in img_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True, separate=False):
'''bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
separate: if true, will returng the channels as
separate images
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
if separate:
rlt = rlt.astype(in_img_type)
# y, cb, cr
return rlt[:, :, 0], rlt[:, :, 1], rlt[:, :, 2]
else:
return rlt.astype(in_img_type)
'''
def ycbcr2rgb_(img, only_y=True):
"""same as matlab ycbcr2rgb
(Note: this implementation is the original from BasicSR, but
appears to be for ycrcb, like cv2)
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
# original (for ycrcb):
rlt = np.matmul(img_ , [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
#alternative conversion:
# xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
# img_[:, :, [1, 2]] -= 128
# rlt = img_.dot(xform.T)
np.putmask(rlt, rlt > 255, 255)
np.putmask(rlt, rlt < 0, 0)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
def ycbcr2rgb(img, only_y=True):
'''
bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters to MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
mat = np.linalg.inv(mat.T) * 255
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
#TODO: TMP RGB version, to check (PIL)
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
#TODO: TMP RGB version, to check (PIL)
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
#TODO: this should probably be elsewhere (augmentations.py)
def augment(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
#rot90n = rot and random.random() < 0.5
def _augment(img):
if hflip: img = np.flip(img, axis=1) #img[:, ::-1, :]
if vflip: img = np.flip(img, axis=0) #img[::-1, :, :]
#if rot90: img = img.transpose(1, 0, 2)
if rot90: img = np.rot90(img, 1) #90 degrees # In PIL: img.transpose(Image.ROTATE_90)
#if rot90n: img = np.rot90(img, -1) #-90 degrees
return img
return [_augment(img) for img in img_list]
####################
# Normalization functions
####################
#TODO: Could also automatically detect the possible range with min and max, like in def ssim()
def denorm(x, min_max=(-1.0, 1.0)):
'''
Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh)
'''
out = (x - min_max[0]) / (min_max[1] - min_max[0])
if isinstance(x, torch.Tensor):
return out.clamp(0, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, 0, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
def norm(x):
#Normalize (z-norm) from [0,1] range to [-1,1]
out = (x - 0.5) * 2.0
if isinstance(x, torch.Tensor):
return out.clamp(-1, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, -1, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
####################
# np and tensor conversions
####################
#2tensor
# MASKED: np2tensor function (lines 420-450)
#2np
def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255,
denormalize=False, change_range=True, imtype=np.uint8):
"""
Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
if not isinstance(img, torch.Tensor):
raise TypeError("Got unexpected object type, expected torch.Tensor")
n_dim = img.dim()
#TODO: Check: could denormalize here in tensor form instead, but end result is the same
img = img.float().cpu()
if n_dim == 4 or n_dim == 3:
#if n_dim == 4, has to convert to 3 dimensions, either removing batch or by creating a grid
if n_dim == 4 and remove_batch:
if img.shape[0] > 1:
# leave only the first image in the batch
img = img[0,...]
else:
# remove a fake batch dimension
img = img.squeeze()
# squeeze removes batch and channel of grayscale images (dimensions = 1)
if len(img.shape) < 3:
#add back the lost channel dimension
img = img.unsqueeze(dim=0)
# convert images in batch (BCHW) to a grid of all images (C B*H B*W)
else:
n_img = len(img)
img = make_grid(img, nrow=int(math.sqrt(n_img)), normalize=False)
if img.shape[0] == 3 and rgb2bgr: #RGB
#RGB to BGR -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgb_to_bgr(img).numpy()
elif img.shape[0] == 4 and rgb2bgr: #RGBA
#RGBA to BGRA -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # "CHW to HWC" -> # HWC, BGR
elif n_dim == 2:
img_np = img.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
#if rgb2bgr:
#img_np = img_np[[2, 1, 0], :, :] #RGB to BGR -> in numpy, if using OpenCV, else not needed. Only if image has colors.
#TODO: Check: could denormalize in the begining in tensor form instead
if denormalize:
img_np = denorm(img_np) #denormalize if needed
if change_range:
img_np = np.clip(data_range*img_np,0,data_range).round() #clip to the data_range
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
#has to be in range (0,255) before changing to np.uint8, else np.float32
return img_np.astype(imtype)
####################
# Prepare Images
####################
# https://github.com/sunreef/BlindSR/blob/master/src/image_utils.py
def patchify_tensor(features, patch_size, overlap=10):
batch_size, channels, height, width = features.size()
effective_patch_size = patch_size - overlap
n_patches_height = (height // effective_patch_size)
n_patches_width = (width // effective_patch_size)
if n_patches_height * effective_patch_size < height:
n_patches_height += 1
if n_patches_width * effective_patch_size < width:
n_patches_width += 1
patches = []
for b in range(batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, height - patch_size)
patch_start_width = min(w * effective_patch_size, width - patch_size)
patches.append(features[b:b+1, :,
patch_start_height: patch_start_height + patch_size,
patch_start_width: patch_start_width + patch_size])
return torch.cat(patches, 0)
def recompose_tensor(patches, full_height, full_width, overlap=10):
batch_size, channels, patch_size, _ = patches.size()
effective_patch_size = patch_size - overlap
n_patches_height = (full_height // effective_patch_size)
n_patches_width = (full_width // effective_patch_size)
if n_patches_height * effective_patch_size < full_height:
n_patches_height += 1
if n_patches_width * effective_patch_size < full_width:
n_patches_width += 1
n_patches = n_patches_height * n_patches_width
if batch_size % n_patches != 0:
print("Error: The number of patches provided to the recompose function does not match the number of patches in each image.")
final_batch_size = batch_size // n_patches
blending_in = torch.linspace(0.1, 1.0, overlap)
blending_out = torch.linspace(1.0, 0.1, overlap)
middle_part = torch.ones(patch_size - 2 * overlap)
blending_profile = torch.cat([blending_in, middle_part, blending_out], 0)
horizontal_blending = blending_profile[None].repeat(patch_size, 1)
vertical_blending = blending_profile[:, None].repeat(1, patch_size)
blending_patch = horizontal_blending * vertical_blending
blending_image = torch.zeros(1, channels, full_height, full_width)
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
blending_image[0, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += blending_patch[None]
recomposed_tensor = torch.zeros(final_batch_size, channels, full_height, full_width)
if patches.is_cuda:
blending_patch = blending_patch.cuda()
blending_image = blending_image.cuda()
recomposed_tensor = recomposed_tensor.cuda()
patch_index = 0
for b in range(final_batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
recomposed_tensor[b, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += patches[patch_index] * blending_patch
patch_index += 1
recomposed_tensor /= blending_image
return recomposed_tensor
#TODO: imresize could be an independent file (imresize.py)
####################
# Matlab imresize
####################
# These next functions are all interpolation methods. x is the distance from the left pixel center
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def box(x):
return ((-0.5 <= x) & (x < 0.5)) * 1.0
def linear(x):
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
def lanczos2(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/2) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 2) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 2))
def lanczos3(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/3) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 3) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 3))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply kernel
if (scale < 1) and (antialiasing):
weights = scale * kernel(distance_to_center * scale)
else:
weights = kernel(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize(img, scale, antialiasing=True, interpolation=None):
# The scale should be the same for H and W
# input: img: CHW RGB [0,1]
# output: CHW RGB [0,1] w/o round
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[0, i, :] = img_aug[0, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[1, i, :] = img_aug[1, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[2, i, :] = img_aug[2, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[0, :, i] = out_1_aug[0, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[1, :, i] = out_1_aug[1, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[2, :, i] = out_1_aug[2, :, idx:idx + kernel_width].mv(weights_W[i])
return out_2
def imresize_np(img, scale, antialiasing=True, interpolation=None):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC BGR [0,1]
# output: HWC BGR [0,1] w/o round
change_range = False
if img.max() > 1:
img_type = img.dtype
if np.issubdtype(img_type, np.integer):
info = np.iinfo
elif np.issubdtype(img_type, np.floating):
info = np.finfo
img = img/info(img_type).max
change_range = True
img = torch.from_numpy(img)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i])
out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i])
out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i])
out_2 = out_2.numpy().clip(0,1)
if change_range:
out_2 = out_2*info(img_type).max #uint8 = 255
out_2 = out_2.astype(img_type)
return out_2
if __name__ == '__main__':
# test imresize function
# read images
img = cv2.imread('test.png')
img = img * 1.0 / 255
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
# imresize
scale = 1 / 4
import time
total_time = 0
for i in range(10):
start_time = time.time()
rlt = imresize(img, scale, antialiasing=True)
use_time = time.time() - start_time
total_time += use_time
print('average time: {}'.format(total_time / 10))
import torchvision.utils
torchvision.utils.save_image(
(rlt * 255).round() / 255, 'rlt.png', nrow=1, padding=0, normalize=False) | def np2tensor(img, bgr2rgb=True, data_range=1., normalize=False, change_range=True, add_batch=True):
"""
Converts a numpy image array into a Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added
"""
if not isinstance(img, np.ndarray): #images expected to be uint8 -> 255
raise TypeError("Got unexpected object type, expected np.ndarray")
#check how many channels the image has, then condition, like in my BasicSR. ie. RGB, RGBA, Gray
#if bgr2rgb:
#img = img[:, :, [2, 1, 0]] #BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
if np.issubdtype(img.dtype, np.integer):
info = np.iinfo
elif np.issubdtype(img.dtype, np.floating):
info = np.finfo
img = img*data_range/info(img.dtype).max #uint8 = /255
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float() #"HWC to CHW" and "numpy to tensor"
if bgr2rgb:
if img.shape[0] == 3: #RGB
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img = bgr_to_rgb(img)
elif img.shape[0] == 4: #RGBA
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.)
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0) # Add fake batch dimension = 1 . squeeze() will remove the dimensions of size 1
if normalize:
img = norm(img)
return img | 420 | 450 | """
BasicSR/codes/dataops/common.py (8-Nov-20)
https://github.com/victorca25/BasicSR/blob/dev2/codes/dataops/common.py
"""
import os
import math
import pickle
import random
import numpy as np
import torch
import cv2
import logging
import copy
from torchvision.utils import make_grid
#from dataops.colors import *
from .colors import *
#from dataops.debug import tmp_vis, describe_numpy, describe_tensor
####################
# Files & IO
####################
###################### get image path list ######################
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.dng', '.DNG', '.webp','.npy', '.NPY']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def _get_paths_from_images(path):
'''get image path list from image folder'''
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
def _get_paths_from_lmdb(dataroot):
'''get image path list from lmdb'''
import lmdb
env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)
keys_cache_file = os.path.join(dataroot, '_keys_cache.p')
logger = logging.getLogger('base')
if os.path.isfile(keys_cache_file):
logger.info('Read lmdb keys from cache: {}'.format(keys_cache_file))
keys = pickle.load(open(keys_cache_file, "rb"))
else:
with env.begin(write=False) as txn:
logger.info('Creating lmdb keys cache: {}'.format(keys_cache_file))
keys = [key.decode('ascii') for key, _ in txn.cursor()]
pickle.dump(keys, open(keys_cache_file, 'wb'))
paths = sorted([key for key in keys if not key.endswith('.meta')])
return env, paths
def get_image_paths(data_type, dataroot):
'''get image path list
support lmdb or image files'''
env, paths = None, None
if dataroot is not None:
if data_type == 'lmdb':
env, paths = _get_paths_from_lmdb(dataroot)
elif data_type == 'img':
paths = sorted(_get_paths_from_images(dataroot))
else:
raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return env, paths
###################### read images ######################
def _read_lmdb_img(env, path):
with env.begin(write=False) as txn:
buf = txn.get(path.encode('ascii'))
buf_meta = txn.get((path + '.meta').encode('ascii')).decode('ascii')
img_flat = np.frombuffer(buf, dtype=np.uint8)
H, W, C = [int(s) for s in buf_meta.split(',')]
img = img_flat.reshape(H, W, C)
return img
def read_img(env, path, out_nc=3, fix_channels=True):
'''
Reads image using cv2 (rawpy if dng) or from lmdb by default
(can also use using PIL instead of cv2)
Arguments:
out_nc: Desired number of channels
fix_channels: changes the images to the desired number of channels
Output:
Numpy uint8, HWC, BGR, [0,255] by default
'''
img = None
if env is None: # img
if(path[-3:].lower() == 'dng'): # if image is a DNG
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
if(path[-3:].lower() == 'npy'): # if image is a NPY numpy array
with open(path, 'rb') as f:
img = np.load(f)
else: # else, if image can be read by cv2
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
#TODO: add variable detecting if cv2 is not available and try PIL instead
# elif: # using PIL instead of OpenCV
# img = Image.open(path).convert('RGB')
# else: # For other images unrecognized by cv2
# import matplotlib.pyplot as plt
# img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
else:
img = _read_lmdb_img(env, path)
# if not img:
# raise ValueError(f"Failed to read image: {path}")
if fix_channels:
img = fix_img_channels(img, out_nc)
return img
def fix_img_channels(img, out_nc):
'''
fix image channels to the expected number
'''
# if image has only 2 dimensions, add "channel" dimension (1)
if img.ndim == 2:
#img = img[..., np.newaxis] #alt
#img = np.expand_dims(img, axis=2)
img = np.tile(np.expand_dims(img, axis=2), (1, 1, 3))
# special case: properly remove alpha channel
if out_nc == 3 and img.shape[2] == 4:
img = bgra2rgb(img)
# remove all extra channels
elif img.shape[2] > out_nc:
img = img[:, :, :out_nc]
# if alpha is expected, add solid alpha channel
elif img.shape[2] == 3 and out_nc == 4:
img = np.dstack((img, np.full(img.shape[:-1], 255, dtype=np.uint8)))
return img
####################
# image processing
# process on numpy image
####################
def bgra2rgb(img):
'''
cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) has an issue removing the alpha channel,
this gets rid of wrong transparent colors that can harm training
'''
if img.shape[2] == 4:
#b, g, r, a = cv2.split((img*255).astype(np.uint8))
b, g, r, a = cv2.split((img.astype(np.uint8)))
b = cv2.bitwise_and(b, b, mask=a)
g = cv2.bitwise_and(g, g, mask=a)
r = cv2.bitwise_and(r, r, mask=a)
#return cv2.merge([b, g, r]).astype(np.float32)/255.
return cv2.merge([b, g, r])
return img
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
# Note: OpenCV uses inverted channels BGR, instead of RGB.
# If images are loaded with something other than OpenCV,
# check that the channels are in the correct order and use
# the alternative conversion functions.
#if in_c == 4 and tar_type == 'RGB-A': # BGRA to BGR, remove alpha channel
#return [cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) for img in img_list]
#return [bgra2rgb(img) for img in img_list]
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'RGB-LAB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_BGR2LAB) for img in img_list]
elif in_c == 3 and tar_type == 'LAB-RGB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_LAB2BGR) for img in img_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True, separate=False):
'''bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
separate: if true, will returng the channels as
separate images
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
if separate:
rlt = rlt.astype(in_img_type)
# y, cb, cr
return rlt[:, :, 0], rlt[:, :, 1], rlt[:, :, 2]
else:
return rlt.astype(in_img_type)
'''
def ycbcr2rgb_(img, only_y=True):
"""same as matlab ycbcr2rgb
(Note: this implementation is the original from BasicSR, but
appears to be for ycrcb, like cv2)
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
# original (for ycrcb):
rlt = np.matmul(img_ , [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
#alternative conversion:
# xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
# img_[:, :, [1, 2]] -= 128
# rlt = img_.dot(xform.T)
np.putmask(rlt, rlt > 255, 255)
np.putmask(rlt, rlt < 0, 0)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
def ycbcr2rgb(img, only_y=True):
'''
bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters to MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
mat = np.linalg.inv(mat.T) * 255
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
#TODO: TMP RGB version, to check (PIL)
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
#TODO: TMP RGB version, to check (PIL)
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
#TODO: this should probably be elsewhere (augmentations.py)
def augment(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
#rot90n = rot and random.random() < 0.5
def _augment(img):
if hflip: img = np.flip(img, axis=1) #img[:, ::-1, :]
if vflip: img = np.flip(img, axis=0) #img[::-1, :, :]
#if rot90: img = img.transpose(1, 0, 2)
if rot90: img = np.rot90(img, 1) #90 degrees # In PIL: img.transpose(Image.ROTATE_90)
#if rot90n: img = np.rot90(img, -1) #-90 degrees
return img
return [_augment(img) for img in img_list]
####################
# Normalization functions
####################
#TODO: Could also automatically detect the possible range with min and max, like in def ssim()
def denorm(x, min_max=(-1.0, 1.0)):
'''
Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh)
'''
out = (x - min_max[0]) / (min_max[1] - min_max[0])
if isinstance(x, torch.Tensor):
return out.clamp(0, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, 0, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
def norm(x):
#Normalize (z-norm) from [0,1] range to [-1,1]
out = (x - 0.5) * 2.0
if isinstance(x, torch.Tensor):
return out.clamp(-1, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, -1, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
####################
# np and tensor conversions
####################
#2tensor
def np2tensor(img, bgr2rgb=True, data_range=1., normalize=False, change_range=True, add_batch=True):
"""
Converts a numpy image array into a Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added
"""
if not isinstance(img, np.ndarray): #images expected to be uint8 -> 255
raise TypeError("Got unexpected object type, expected np.ndarray")
#check how many channels the image has, then condition, like in my BasicSR. ie. RGB, RGBA, Gray
#if bgr2rgb:
#img = img[:, :, [2, 1, 0]] #BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
if np.issubdtype(img.dtype, np.integer):
info = np.iinfo
elif np.issubdtype(img.dtype, np.floating):
info = np.finfo
img = img*data_range/info(img.dtype).max #uint8 = /255
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float() #"HWC to CHW" and "numpy to tensor"
if bgr2rgb:
if img.shape[0] == 3: #RGB
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img = bgr_to_rgb(img)
elif img.shape[0] == 4: #RGBA
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.)
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0) # Add fake batch dimension = 1 . squeeze() will remove the dimensions of size 1
if normalize:
img = norm(img)
return img
#2np
def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255,
denormalize=False, change_range=True, imtype=np.uint8):
"""
Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
if not isinstance(img, torch.Tensor):
raise TypeError("Got unexpected object type, expected torch.Tensor")
n_dim = img.dim()
#TODO: Check: could denormalize here in tensor form instead, but end result is the same
img = img.float().cpu()
if n_dim == 4 or n_dim == 3:
#if n_dim == 4, has to convert to 3 dimensions, either removing batch or by creating a grid
if n_dim == 4 and remove_batch:
if img.shape[0] > 1:
# leave only the first image in the batch
img = img[0,...]
else:
# remove a fake batch dimension
img = img.squeeze()
# squeeze removes batch and channel of grayscale images (dimensions = 1)
if len(img.shape) < 3:
#add back the lost channel dimension
img = img.unsqueeze(dim=0)
# convert images in batch (BCHW) to a grid of all images (C B*H B*W)
else:
n_img = len(img)
img = make_grid(img, nrow=int(math.sqrt(n_img)), normalize=False)
if img.shape[0] == 3 and rgb2bgr: #RGB
#RGB to BGR -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgb_to_bgr(img).numpy()
elif img.shape[0] == 4 and rgb2bgr: #RGBA
#RGBA to BGRA -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # "CHW to HWC" -> # HWC, BGR
elif n_dim == 2:
img_np = img.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
#if rgb2bgr:
#img_np = img_np[[2, 1, 0], :, :] #RGB to BGR -> in numpy, if using OpenCV, else not needed. Only if image has colors.
#TODO: Check: could denormalize in the begining in tensor form instead
if denormalize:
img_np = denorm(img_np) #denormalize if needed
if change_range:
img_np = np.clip(data_range*img_np,0,data_range).round() #clip to the data_range
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
#has to be in range (0,255) before changing to np.uint8, else np.float32
return img_np.astype(imtype)
####################
# Prepare Images
####################
# https://github.com/sunreef/BlindSR/blob/master/src/image_utils.py
def patchify_tensor(features, patch_size, overlap=10):
batch_size, channels, height, width = features.size()
effective_patch_size = patch_size - overlap
n_patches_height = (height // effective_patch_size)
n_patches_width = (width // effective_patch_size)
if n_patches_height * effective_patch_size < height:
n_patches_height += 1
if n_patches_width * effective_patch_size < width:
n_patches_width += 1
patches = []
for b in range(batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, height - patch_size)
patch_start_width = min(w * effective_patch_size, width - patch_size)
patches.append(features[b:b+1, :,
patch_start_height: patch_start_height + patch_size,
patch_start_width: patch_start_width + patch_size])
return torch.cat(patches, 0)
def recompose_tensor(patches, full_height, full_width, overlap=10):
batch_size, channels, patch_size, _ = patches.size()
effective_patch_size = patch_size - overlap
n_patches_height = (full_height // effective_patch_size)
n_patches_width = (full_width // effective_patch_size)
if n_patches_height * effective_patch_size < full_height:
n_patches_height += 1
if n_patches_width * effective_patch_size < full_width:
n_patches_width += 1
n_patches = n_patches_height * n_patches_width
if batch_size % n_patches != 0:
print("Error: The number of patches provided to the recompose function does not match the number of patches in each image.")
final_batch_size = batch_size // n_patches
blending_in = torch.linspace(0.1, 1.0, overlap)
blending_out = torch.linspace(1.0, 0.1, overlap)
middle_part = torch.ones(patch_size - 2 * overlap)
blending_profile = torch.cat([blending_in, middle_part, blending_out], 0)
horizontal_blending = blending_profile[None].repeat(patch_size, 1)
vertical_blending = blending_profile[:, None].repeat(1, patch_size)
blending_patch = horizontal_blending * vertical_blending
blending_image = torch.zeros(1, channels, full_height, full_width)
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
blending_image[0, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += blending_patch[None]
recomposed_tensor = torch.zeros(final_batch_size, channels, full_height, full_width)
if patches.is_cuda:
blending_patch = blending_patch.cuda()
blending_image = blending_image.cuda()
recomposed_tensor = recomposed_tensor.cuda()
patch_index = 0
for b in range(final_batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
recomposed_tensor[b, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += patches[patch_index] * blending_patch
patch_index += 1
recomposed_tensor /= blending_image
return recomposed_tensor
#TODO: imresize could be an independent file (imresize.py)
####################
# Matlab imresize
####################
# These next functions are all interpolation methods. x is the distance from the left pixel center
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def box(x):
return ((-0.5 <= x) & (x < 0.5)) * 1.0
def linear(x):
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
def lanczos2(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/2) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 2) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 2))
def lanczos3(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/3) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 3) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 3))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply kernel
if (scale < 1) and (antialiasing):
weights = scale * kernel(distance_to_center * scale)
else:
weights = kernel(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize(img, scale, antialiasing=True, interpolation=None):
# The scale should be the same for H and W
# input: img: CHW RGB [0,1]
# output: CHW RGB [0,1] w/o round
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[0, i, :] = img_aug[0, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[1, i, :] = img_aug[1, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[2, i, :] = img_aug[2, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[0, :, i] = out_1_aug[0, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[1, :, i] = out_1_aug[1, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[2, :, i] = out_1_aug[2, :, idx:idx + kernel_width].mv(weights_W[i])
return out_2
def imresize_np(img, scale, antialiasing=True, interpolation=None):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC BGR [0,1]
# output: HWC BGR [0,1] w/o round
change_range = False
if img.max() > 1:
img_type = img.dtype
if np.issubdtype(img_type, np.integer):
info = np.iinfo
elif np.issubdtype(img_type, np.floating):
info = np.finfo
img = img/info(img_type).max
change_range = True
img = torch.from_numpy(img)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i])
out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i])
out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i])
out_2 = out_2.numpy().clip(0,1)
if change_range:
out_2 = out_2*info(img_type).max #uint8 = 255
out_2 = out_2.astype(img_type)
return out_2
if __name__ == '__main__':
# test imresize function
# read images
img = cv2.imread('test.png')
img = img * 1.0 / 255
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
# imresize
scale = 1 / 4
import time
total_time = 0
for i in range(10):
start_time = time.time()
rlt = imresize(img, scale, antialiasing=True)
use_time = time.time() - start_time
total_time += use_time
print('average time: {}'.format(total_time / 10))
import torchvision.utils
torchvision.utils.save_image(
(rlt * 255).round() / 255, 'rlt.png', nrow=1, padding=0, normalize=False)
|
cookouttray | For those who do finances with cookout trays, we proudly present the command for you
Simply type one of the following:
cookouttray
ctray
trayforjay
Followed by a monetary value such as (leave off the dollar sign):
20
100
3.14
To have it converted into cookout trays
Examples:
cookouttray 20
ctray 100
trayforjay 3.14
Clicking the link "Cash to Cookout Tray Converter" in the output will also take you to cookout's website | import concurrent.futures
import datetime
import io
import logging
import os
import random
import time
import typing as t
import discord
import discord.ext.commands as commands
from PIL import Image, ImageDraw, ImageSequence, ImageFont
import bot.extensions as ext
from bot.consts import Colors
from bot.messaging.events import Events
log = logging.getLogger(__name__)
MAX_WALDO_GRID_SIZE = 100
CRAB_LINE_LENGTH = 58
CRAB_COMMAND_COOLDOWN = 3
def pillow_process(args, is_rave, lines_in_text, timestamp):
# Open crab.gif and add our font
with Image.open('bot/cogs/memes_cog/assets/crab.gif') as im:
fnt = ImageFont.truetype('bot/cogs/memes_cog/assets/LemonMilk.otf', 11)
# Draw text on each frame of the gif
# Gonna be honest I don't quite understand how it works but I got it from the Pillow docs/issues
frames = []
for frame in ImageSequence.Iterator(im):
d = ImageDraw.Draw(frame)
w, h = d.textsize(args, fnt)
# draws the text on to the frame. Tries to center horizontally and tries to go as close to the bottom as possible
d.text((im.size[0] / 2 - w / 2, im.size[1] - h - (5 * lines_in_text)), args, font=fnt, align='center',
stroke_width=bool(is_rave), stroke_fill=Colors.ClemsonOrange, spacing=6)
del d
b = io.BytesIO()
frame.save(b, format='GIF')
frame = Image.open(b)
frames.append(frame)
frames[0].save(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif', save_all=True, append_images=frames[1:])
class MemesCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@ext.command()
@ext.long_help(
'A fun command to generate a pseudo bubblewrap effect in discord'
)
@ext.short_help('Creates bubblewrap!')
@ext.example('bubblewrap')
async def bubblewrap(self, ctx):
msg = ''
for _ in range(0, 5):
for _ in range(0, 10):
msg += '||pop!|| '
msg += '\n'
await ctx.send(msg)
@commands.command()
@ext.long_help(
'A fun command to generate a wheres waldo effect in discord, see if you can find him first!'
'Optionally takes a size parameter to make it easier or harder'
)
@ext.short_help('Can you find him?')
@ext.example(('waldo', 'waldo 10'))
async def waldo(self, ctx, size=MAX_WALDO_GRID_SIZE):
"""
Play Where's Waldo!
Usage: <prefix>waldo [size = 100]
"""
random_start_letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'X',
'Y', 'Z']
max_waldo_line_size = 6
new_line_waldo_chance = 10
msg = ''
count = 0
place = random.randint(0, size)
for i in range(size + 1):
if i == place:
msg += '||`WALDO`|| '
count += 1
else:
helper = random.randint(0, len(random_start_letters) - 1)
letter = random_start_letters[helper]
msg += f'||`{letter}ALDO`|| '
count += 1
new_line = random.randint(0, 100)
if new_line < new_line_waldo_chance or count > max_waldo_line_size:
msg += '\n'
count = 0
await ctx.send(msg)
@ext.command()
@ext.chainable()
@ext.long_help(
'A fun command to spongebob meme text in discord'
)
@ext.short_help('sO yOu doNt KnOw wHat tHiS Is?')
@ext.example('spongebob hello world')
async def spongebob(self, ctx, *, args):
"""
Spongebob Text
"""
random.seed(time.time())
args = args.replace('"', "'")
result = ''
for i in args:
helper = random.randint(0, 100)
if helper > 60:
result += str(i).upper()
else:
result += str(i).lower()
await ctx.send(result)
@ext.command(aliases=['rave', '🦀'])
@commands.cooldown(1, CRAB_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help(
'A fun command to generate a crab rave gif with specified text overlay'
)
@ext.short_help('Generates a crab rave gif')
@ext.chainable_input()
@ext.example('crab hello from crab world')
async def crab(self, ctx, is_rave: t.Optional[bool] = True, *, args='Bottom text\n is dead'):
"""
Create your own crab rave.
Usage: <prefix>crab [is_rave=True] [text=Bottom text\\n is dead]
Aliases: rave, 🦀
"""
# crab.gif dimensions - 352 by 200
# Immediately grab the timestamp incase of multiple calls in a row
timestamp = datetime.datetime.utcnow().microsecond
wait_msg = await ctx.send('Generating your gif')
args = args.replace('\\', '')
# Add new lines for when the text would go out of bounds
lines_in_text = 1
while len(args) > (CRAB_LINE_LENGTH * lines_in_text):
newline_loc = CRAB_LINE_LENGTH * lines_in_text
# I didn't want to add a newline in the middle of a word
while not args[newline_loc].isspace():
newline_loc -= 1
if newline_loc == CRAB_LINE_LENGTH * (lines_in_text - 1):
newline_loc = CRAB_LINE_LENGTH * lines_in_text
break
args = f'{args[:newline_loc]} \n{args[newline_loc:]}'
lines_in_text += 1
loop = self.bot.loop
with concurrent.futures.ProcessPoolExecutor() as pool:
pil_args = (args, is_rave, lines_in_text, timestamp)
await loop.run_in_executor(pool, pillow_process, *pil_args)
# Attach, send, and delete created gif
attachment = discord.File(filename=f'out_{timestamp}.gif', fp=f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
msg = await ctx.send(file=attachment)
await self.bot.messenger.publish(Events.on_set_deletable, msg=msg, author=ctx.author)
await wait_msg.delete()
os.remove(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
# MASKED: cookouttray function (lines 181-211)
def setup(bot):
bot.add_cog(MemesCog(bot)) | @ext.command(hidden=True, aliases=['ctray', 'trayforjay'])
async def cookouttray(self, ctx, input):
"""
For those who do finances with cookout trays, we proudly present the command for you
Simply type one of the following:
cookouttray
ctray
trayforjay
Followed by a monetary value such as (leave off the dollar sign):
20
100
3.14
To have it converted into cookout trays
Examples:
cookouttray 20
ctray 100
trayforjay 3.14
Clicking the link "Cash to Cookout Tray Converter" in the output will also take you to cookout's website
"""
money = round(float(input), 2)
output = money / 5
embed = discord.Embed(
title='Cash to Cookout Tray Converter',
description=f'{ctx.message.author.mention} ${money} is approximately {output} cookout trays',
url=f"https://www.fastfoodmenuprices.com/cookout-prices/",
color=Colors.ClemsonOrange)
await ctx.send(embed=embed) | 181 | 211 | import concurrent.futures
import datetime
import io
import logging
import os
import random
import time
import typing as t
import discord
import discord.ext.commands as commands
from PIL import Image, ImageDraw, ImageSequence, ImageFont
import bot.extensions as ext
from bot.consts import Colors
from bot.messaging.events import Events
log = logging.getLogger(__name__)
MAX_WALDO_GRID_SIZE = 100
CRAB_LINE_LENGTH = 58
CRAB_COMMAND_COOLDOWN = 3
def pillow_process(args, is_rave, lines_in_text, timestamp):
# Open crab.gif and add our font
with Image.open('bot/cogs/memes_cog/assets/crab.gif') as im:
fnt = ImageFont.truetype('bot/cogs/memes_cog/assets/LemonMilk.otf', 11)
# Draw text on each frame of the gif
# Gonna be honest I don't quite understand how it works but I got it from the Pillow docs/issues
frames = []
for frame in ImageSequence.Iterator(im):
d = ImageDraw.Draw(frame)
w, h = d.textsize(args, fnt)
# draws the text on to the frame. Tries to center horizontally and tries to go as close to the bottom as possible
d.text((im.size[0] / 2 - w / 2, im.size[1] - h - (5 * lines_in_text)), args, font=fnt, align='center',
stroke_width=bool(is_rave), stroke_fill=Colors.ClemsonOrange, spacing=6)
del d
b = io.BytesIO()
frame.save(b, format='GIF')
frame = Image.open(b)
frames.append(frame)
frames[0].save(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif', save_all=True, append_images=frames[1:])
class MemesCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@ext.command()
@ext.long_help(
'A fun command to generate a pseudo bubblewrap effect in discord'
)
@ext.short_help('Creates bubblewrap!')
@ext.example('bubblewrap')
async def bubblewrap(self, ctx):
msg = ''
for _ in range(0, 5):
for _ in range(0, 10):
msg += '||pop!|| '
msg += '\n'
await ctx.send(msg)
@commands.command()
@ext.long_help(
'A fun command to generate a wheres waldo effect in discord, see if you can find him first!'
'Optionally takes a size parameter to make it easier or harder'
)
@ext.short_help('Can you find him?')
@ext.example(('waldo', 'waldo 10'))
async def waldo(self, ctx, size=MAX_WALDO_GRID_SIZE):
"""
Play Where's Waldo!
Usage: <prefix>waldo [size = 100]
"""
random_start_letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'X',
'Y', 'Z']
max_waldo_line_size = 6
new_line_waldo_chance = 10
msg = ''
count = 0
place = random.randint(0, size)
for i in range(size + 1):
if i == place:
msg += '||`WALDO`|| '
count += 1
else:
helper = random.randint(0, len(random_start_letters) - 1)
letter = random_start_letters[helper]
msg += f'||`{letter}ALDO`|| '
count += 1
new_line = random.randint(0, 100)
if new_line < new_line_waldo_chance or count > max_waldo_line_size:
msg += '\n'
count = 0
await ctx.send(msg)
@ext.command()
@ext.chainable()
@ext.long_help(
'A fun command to spongebob meme text in discord'
)
@ext.short_help('sO yOu doNt KnOw wHat tHiS Is?')
@ext.example('spongebob hello world')
async def spongebob(self, ctx, *, args):
"""
Spongebob Text
"""
random.seed(time.time())
args = args.replace('"', "'")
result = ''
for i in args:
helper = random.randint(0, 100)
if helper > 60:
result += str(i).upper()
else:
result += str(i).lower()
await ctx.send(result)
@ext.command(aliases=['rave', '🦀'])
@commands.cooldown(1, CRAB_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help(
'A fun command to generate a crab rave gif with specified text overlay'
)
@ext.short_help('Generates a crab rave gif')
@ext.chainable_input()
@ext.example('crab hello from crab world')
async def crab(self, ctx, is_rave: t.Optional[bool] = True, *, args='Bottom text\n is dead'):
"""
Create your own crab rave.
Usage: <prefix>crab [is_rave=True] [text=Bottom text\\n is dead]
Aliases: rave, 🦀
"""
# crab.gif dimensions - 352 by 200
# Immediately grab the timestamp incase of multiple calls in a row
timestamp = datetime.datetime.utcnow().microsecond
wait_msg = await ctx.send('Generating your gif')
args = args.replace('\\', '')
# Add new lines for when the text would go out of bounds
lines_in_text = 1
while len(args) > (CRAB_LINE_LENGTH * lines_in_text):
newline_loc = CRAB_LINE_LENGTH * lines_in_text
# I didn't want to add a newline in the middle of a word
while not args[newline_loc].isspace():
newline_loc -= 1
if newline_loc == CRAB_LINE_LENGTH * (lines_in_text - 1):
newline_loc = CRAB_LINE_LENGTH * lines_in_text
break
args = f'{args[:newline_loc]} \n{args[newline_loc:]}'
lines_in_text += 1
loop = self.bot.loop
with concurrent.futures.ProcessPoolExecutor() as pool:
pil_args = (args, is_rave, lines_in_text, timestamp)
await loop.run_in_executor(pool, pillow_process, *pil_args)
# Attach, send, and delete created gif
attachment = discord.File(filename=f'out_{timestamp}.gif', fp=f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
msg = await ctx.send(file=attachment)
await self.bot.messenger.publish(Events.on_set_deletable, msg=msg, author=ctx.author)
await wait_msg.delete()
os.remove(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
@ext.command(hidden=True, aliases=['ctray', 'trayforjay'])
async def cookouttray(self, ctx, input):
"""
For those who do finances with cookout trays, we proudly present the command for you
Simply type one of the following:
cookouttray
ctray
trayforjay
Followed by a monetary value such as (leave off the dollar sign):
20
100
3.14
To have it converted into cookout trays
Examples:
cookouttray 20
ctray 100
trayforjay 3.14
Clicking the link "Cash to Cookout Tray Converter" in the output will also take you to cookout's website
"""
money = round(float(input), 2)
output = money / 5
embed = discord.Embed(
title='Cash to Cookout Tray Converter',
description=f'{ctx.message.author.mention} ${money} is approximately {output} cookout trays',
url=f"https://www.fastfoodmenuprices.com/cookout-prices/",
color=Colors.ClemsonOrange)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(MemesCog(bot))
|
_invalid_headers | Verify whether the provided metadata in the URL is also present in the headers
:param url: .../file.txt&content-type=app%2Fjson&Signature=..
:param headers: Content-Type=app/json
:return: True or False | from __future__ import unicode_literals
import io
import os
import re
import sys
from botocore.awsrequest import AWSPreparedRequest
from moto.core.utils import (
amzn_request_id,
str_to_rfc_1123_datetime,
py2_strip_unicode_keys,
)
from urllib.parse import (
parse_qs,
parse_qsl,
urlparse,
unquote,
urlencode,
urlunparse,
)
import xmltodict
from moto.packages.httpretty.core import HTTPrettyRequest
from moto.core.responses import _TemplateEnvironmentMixin, ActionAuthenticatorMixin
from moto.core.utils import path_url
from moto.core import ACCOUNT_ID
from moto.settings import S3_IGNORE_SUBDOMAIN_BUCKETNAME
from moto.s3bucket_path.utils import (
bucket_name_from_url as bucketpath_bucket_name_from_url,
parse_key_name as bucketpath_parse_key_name,
is_delete_keys as bucketpath_is_delete_keys,
)
from .exceptions import (
BucketAlreadyExists,
BucketMustHaveLockeEnabled,
DuplicateTagKeys,
InvalidContentMD5,
InvalidContinuationToken,
S3ClientError,
MissingBucket,
MissingKey,
MissingVersion,
InvalidMaxPartArgument,
InvalidPartOrder,
MalformedXML,
MalformedACLError,
IllegalLocationConstraintException,
InvalidNotificationARN,
InvalidNotificationEvent,
ObjectNotInActiveTierError,
NoSystemTags,
PreconditionFailed,
InvalidRange,
LockNotEnabled,
)
from .models import (
s3_backend,
get_canned_acl,
FakeGrantee,
FakeGrant,
FakeAcl,
FakeKey,
)
from .utils import (
bucket_name_from_url,
clean_key_name,
metadata_from_headers,
parse_region_from_url,
)
from xml.dom import minidom
DEFAULT_REGION_NAME = "us-east-1"
ACTION_MAP = {
"BUCKET": {
"HEAD": {"DEFAULT": "HeadBucket",},
"GET": {
"uploads": "ListBucketMultipartUploads",
"location": "GetBucketLocation",
"lifecycle": "GetLifecycleConfiguration",
"versioning": "GetBucketVersioning",
"policy": "GetBucketPolicy",
"website": "GetBucketWebsite",
"acl": "GetBucketAcl",
"tagging": "GetBucketTagging",
"logging": "GetBucketLogging",
"cors": "GetBucketCORS",
"notification": "GetBucketNotification",
"accelerate": "GetAccelerateConfiguration",
"versions": "ListBucketVersions",
"public_access_block": "GetPublicAccessBlock",
"DEFAULT": "ListBucket",
},
"PUT": {
"lifecycle": "PutLifecycleConfiguration",
"versioning": "PutBucketVersioning",
"policy": "PutBucketPolicy",
"website": "PutBucketWebsite",
"acl": "PutBucketAcl",
"tagging": "PutBucketTagging",
"logging": "PutBucketLogging",
"cors": "PutBucketCORS",
"notification": "PutBucketNotification",
"accelerate": "PutAccelerateConfiguration",
"public_access_block": "PutPublicAccessBlock",
"DEFAULT": "CreateBucket",
},
"DELETE": {
"lifecycle": "PutLifecycleConfiguration",
"policy": "DeleteBucketPolicy",
"website": "DeleteBucketWebsite",
"tagging": "PutBucketTagging",
"cors": "PutBucketCORS",
"public_access_block": "DeletePublicAccessBlock",
"DEFAULT": "DeleteBucket",
},
},
"KEY": {
"HEAD": {"DEFAULT": "HeadObject",},
"GET": {
"uploadId": "ListMultipartUploadParts",
"acl": "GetObjectAcl",
"tagging": "GetObjectTagging",
"versionId": "GetObjectVersion",
"DEFAULT": "GetObject",
},
"PUT": {
"acl": "PutObjectAcl",
"tagging": "PutObjectTagging",
"DEFAULT": "PutObject",
},
"DELETE": {
"uploadId": "AbortMultipartUpload",
"versionId": "DeleteObjectVersion",
"DEFAULT": "DeleteObject",
},
"POST": {
"uploads": "PutObject",
"restore": "RestoreObject",
"uploadId": "PutObject",
},
},
"CONTROL": {
"GET": {"publicAccessBlock": "GetPublicAccessBlock"},
"PUT": {"publicAccessBlock": "PutPublicAccessBlock"},
"DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"},
},
}
def parse_key_name(pth):
# strip the first '/' left by urlparse
return pth[1:] if pth.startswith("/") else pth
def is_delete_keys(request, path, bucket_name):
# GOlang sends a request as url/?delete= (treating it as a normal key=value, even if the value is empty)
# Python sends a request as url/?delete (treating it as a flag)
# https://github.com/spulec/moto/issues/2937
return (
path == "/?delete"
or path == "/?delete="
or (path == "/" and getattr(request, "query_string", "") == "delete")
)
class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
def __init__(self, backend):
super(ResponseObject, self).__init__()
self.backend = backend
self.method = ""
self.path = ""
self.data = {}
self.headers = {}
@property
def should_autoescape(self):
return True
def all_buckets(self):
self.data["Action"] = "ListAllMyBuckets"
self._authenticate_and_authorize_s3_action()
# No bucket specified. Listing all buckets
all_buckets = self.backend.list_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def subdomain_based_buckets(self, request):
if S3_IGNORE_SUBDOMAIN_BUCKETNAME:
return False
host = request.headers.get("host", request.headers.get("Host"))
if not host:
host = urlparse(request.url).netloc
if (
not host
or host.startswith("localhost")
or host.startswith("localstack")
or re.match(r"^[^.]+$", host)
or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host)
):
# Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev),
# (3) local host names that do not contain a "." (e.g., Docker container host names), or
# (4) kubernetes host names
return False
match = re.match(r"^([^\[\]:]+)(:\d+)?$", host)
if match:
match = re.match(
r"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}", match.groups()[0]
)
if match:
return False
match = re.match(r"^\[(.+)\](:\d+)?$", host)
if match:
match = re.match(
r"^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z",
match.groups()[0],
re.IGNORECASE,
)
if match:
return False
path_based = host == "s3.amazonaws.com" or re.match(
r"s3[\.\-]([^.]*)\.amazonaws\.com", host
)
return not path_based
def is_delete_keys(self, request, path, bucket_name):
if self.subdomain_based_buckets(request):
return is_delete_keys(request, path, bucket_name)
else:
return bucketpath_is_delete_keys(request, path, bucket_name)
def parse_bucket_name_from_url(self, request, url):
if self.subdomain_based_buckets(request):
return bucket_name_from_url(url)
else:
return bucketpath_bucket_name_from_url(url)
def parse_key_name(self, request, url):
if self.subdomain_based_buckets(request):
return parse_key_name(url)
else:
return bucketpath_parse_key_name(url)
def ambiguous_response(self, request, full_url, headers):
# Depending on which calling format the client is using, we don't know
# if this is a bucket or key request so we have to check
if self.subdomain_based_buckets(request):
return self.key_or_control_response(request, full_url, headers)
else:
# Using path-based buckets
return self.bucket_response(request, full_url, headers)
@amzn_request_id
def bucket_response(self, request, full_url, headers):
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
try:
response = self._bucket_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
return self._send_response(response)
@staticmethod
def _send_response(response):
if isinstance(response, str):
return 200, {}, response.encode("utf-8")
else:
status_code, headers, response_content = response
if not isinstance(response_content, bytes):
response_content = response_content.encode("utf-8")
return status_code, headers, response_content
def _bucket_response(self, request, full_url, headers):
querystring = self._get_querystring(full_url)
method = request.method
region_name = parse_region_from_url(full_url)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
self.data["BucketName"] = bucket_name
if hasattr(request, "body"):
# Boto
body = request.body
else:
# Flask server
body = request.data
if body is None:
body = b""
if isinstance(body, bytes):
body = body.decode("utf-8")
body = "{0}".format(body).encode("utf-8")
if method == "HEAD":
return self._bucket_response_head(bucket_name, querystring)
elif method == "GET":
return self._bucket_response_get(bucket_name, querystring)
elif method == "PUT":
return self._bucket_response_put(
request, body, region_name, bucket_name, querystring
)
elif method == "DELETE":
return self._bucket_response_delete(body, bucket_name, querystring)
elif method == "POST":
return self._bucket_response_post(request, body, bucket_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
@staticmethod
def _get_querystring(full_url):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
return querystring
def _bucket_response_head(self, bucket_name, querystring):
self._set_action("BUCKET", "HEAD", querystring)
self._authenticate_and_authorize_s3_action()
try:
self.backend.head_bucket(bucket_name)
except MissingBucket:
# Unless we do this, boto3 does not raise ClientError on
# HEAD (which the real API responds with), and instead
# raises NoSuchBucket, leading to inconsistency in
# error response between real and mocked responses.
return 404, {}, ""
return 200, {}, ""
def _bucket_response_get(self, bucket_name, querystring):
self._set_action("BUCKET", "GET", querystring)
self._authenticate_and_authorize_s3_action()
if "object-lock" in querystring:
(
lock_enabled,
mode,
days,
years,
) = self.backend.get_object_lock_configuration(bucket_name)
template = self.response_template(S3_BUCKET_LOCK_CONFIGURATION)
return template.render(
lock_enabled=lock_enabled, mode=mode, days=days, years=years,
)
if "uploads" in querystring:
for unsup in ("delimiter", "max-uploads"):
if unsup in querystring:
raise NotImplementedError(
"Listing multipart uploads with {} has not been implemented yet.".format(
unsup
)
)
multiparts = list(self.backend.get_all_multiparts(bucket_name).values())
if "prefix" in querystring:
prefix = querystring.get("prefix", [None])[0]
multiparts = [
upload
for upload in multiparts
if upload.key_name.startswith(prefix)
]
template = self.response_template(S3_ALL_MULTIPARTS)
return template.render(bucket_name=bucket_name, uploads=multiparts)
elif "location" in querystring:
location = self.backend.get_bucket_location(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
# us-east-1 is different - returns a None location
if location == DEFAULT_REGION_NAME:
location = None
return template.render(location=location)
elif "lifecycle" in querystring:
rules = self.backend.get_bucket_lifecycle(bucket_name)
if not rules:
template = self.response_template(S3_NO_LIFECYCLE)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION)
return template.render(rules=rules)
elif "versioning" in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
return template.render(status=versioning)
elif "policy" in querystring:
policy = self.backend.get_bucket_policy(bucket_name)
if not policy:
template = self.response_template(S3_NO_POLICY)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, policy
elif "website" in querystring:
website_configuration = self.backend.get_bucket_website_configuration(
bucket_name
)
if not website_configuration:
template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, website_configuration
elif "acl" in querystring:
acl = self.backend.get_bucket_acl(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(acl=acl)
elif "tagging" in querystring:
tags = self.backend.get_bucket_tagging(bucket_name)["Tags"]
# "Special Error" if no tags:
if len(tags) == 0:
template = self.response_template(S3_NO_BUCKET_TAGGING)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return template.render(tags=tags)
elif "logging" in querystring:
logging = self.backend.get_bucket_logging(bucket_name)
if not logging:
template = self.response_template(S3_NO_LOGGING_CONFIG)
return 200, {}, template.render()
template = self.response_template(S3_LOGGING_CONFIG)
return 200, {}, template.render(logging=logging)
elif "cors" in querystring:
cors = self.backend.get_bucket_cors(bucket_name)
if len(cors) == 0:
template = self.response_template(S3_NO_CORS_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_CORS_RESPONSE)
return template.render(cors=cors)
elif "notification" in querystring:
notification_configuration = self.backend.get_bucket_notification_configuration(
bucket_name
)
if not notification_configuration:
return 200, {}, ""
template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG)
return template.render(config=notification_configuration)
elif "accelerate" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if bucket.accelerate_configuration is None:
template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET)
return 200, {}, template.render()
template = self.response_template(S3_BUCKET_ACCELERATE)
return template.render(bucket=bucket)
elif "publicAccessBlock" in querystring:
public_block_config = self.backend.get_public_access_block(bucket_name)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return template.render(public_block_config=public_block_config)
elif "versions" in querystring:
delimiter = querystring.get("delimiter", [None])[0]
encoding_type = querystring.get("encoding-type", [None])[0]
key_marker = querystring.get("key-marker", [None])[0]
max_keys = querystring.get("max-keys", [None])[0]
prefix = querystring.get("prefix", [""])[0]
version_id_marker = querystring.get("version-id-marker", [None])[0]
bucket = self.backend.get_bucket(bucket_name)
(
versions,
common_prefixes,
delete_markers,
) = self.backend.list_object_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
key_marker=key_marker,
max_keys=max_keys,
version_id_marker=version_id_marker,
prefix=prefix,
)
key_list = versions
template = self.response_template(S3_BUCKET_GET_VERSIONS)
return (
200,
{},
template.render(
common_prefixes=common_prefixes,
key_list=key_list,
delete_marker_list=delete_markers,
bucket=bucket,
prefix=prefix,
max_keys=1000,
delimiter=delimiter,
key_marker=key_marker,
is_truncated="false",
),
)
elif "encryption" in querystring:
encryption = self.backend.get_bucket_encryption(bucket_name)
if not encryption:
template = self.response_template(S3_NO_ENCRYPTION)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_ENCRYPTION_CONFIG)
return 200, {}, template.render(encryption=encryption)
elif querystring.get("list-type", [None])[0] == "2":
return 200, {}, self._handle_list_objects_v2(bucket_name, querystring)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
marker = querystring.get("marker", [None])[0]
result_keys, result_folders = self.backend.list_objects(
bucket, prefix, delimiter
)
if marker:
result_keys = self._get_results_from_token(result_keys, marker)
result_keys, is_truncated, next_marker = self._truncate_result(
result_keys, max_keys
)
template = self.response_template(S3_BUCKET_GET_RESPONSE)
return (
200,
{},
template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders,
is_truncated=is_truncated,
next_marker=next_marker,
max_keys=max_keys,
),
)
def _set_action(self, action_resource_type, method, querystring):
action_set = False
for action_in_querystring, action in ACTION_MAP[action_resource_type][
method
].items():
if action_in_querystring in querystring:
self.data["Action"] = action
action_set = True
if not action_set:
self.data["Action"] = ACTION_MAP[action_resource_type][method]["DEFAULT"]
def _handle_list_objects_v2(self, bucket_name, querystring):
template = self.response_template(S3_BUCKET_GET_RESPONSE_V2)
bucket = self.backend.get_bucket(bucket_name)
continuation_token = querystring.get("continuation-token", [None])[0]
if continuation_token is not None and continuation_token == "":
raise InvalidContinuationToken()
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
all_keys = self.backend.list_objects_v2(bucket, prefix, delimiter)
fetch_owner = querystring.get("fetch-owner", [False])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
start_after = querystring.get("start-after", [None])[0]
if continuation_token or start_after:
limit = continuation_token or start_after
all_keys = self._get_results_from_token(all_keys, limit)
truncated_keys, is_truncated, next_continuation_token = self._truncate_result(
all_keys, max_keys
)
result_keys, result_folders = self._split_truncated_keys(truncated_keys)
key_count = len(result_keys) + len(result_folders)
return template.render(
bucket=bucket,
prefix=prefix or "",
delimiter=delimiter,
key_count=key_count,
result_keys=result_keys,
result_folders=result_folders,
fetch_owner=fetch_owner,
max_keys=max_keys,
is_truncated=is_truncated,
next_continuation_token=next_continuation_token,
start_after=None if continuation_token else start_after,
)
@staticmethod
def _split_truncated_keys(truncated_keys):
result_keys = []
result_folders = []
for key in truncated_keys:
if isinstance(key, FakeKey):
result_keys.append(key)
else:
result_folders.append(key)
return result_keys, result_folders
def _get_results_from_token(self, result_keys, token):
continuation_index = 0
for key in result_keys:
if (key.name if isinstance(key, FakeKey) else key) > token:
break
continuation_index += 1
return result_keys[continuation_index:]
def _truncate_result(self, result_keys, max_keys):
if max_keys == 0:
result_keys = []
is_truncated = True
next_continuation_token = None
elif len(result_keys) > max_keys:
is_truncated = "true"
result_keys = result_keys[:max_keys]
item = result_keys[-1]
next_continuation_token = item.name if isinstance(item, FakeKey) else item
else:
is_truncated = "false"
next_continuation_token = None
return result_keys, is_truncated, next_continuation_token
def _body_contains_location_constraint(self, body):
if body:
try:
xmltodict.parse(body)["CreateBucketConfiguration"]["LocationConstraint"]
return True
except KeyError:
pass
return False
def _create_bucket_configuration_is_empty(self, body):
if body:
try:
create_bucket_configuration = xmltodict.parse(body)[
"CreateBucketConfiguration"
]
del create_bucket_configuration["@xmlns"]
if len(create_bucket_configuration) == 0:
return True
except KeyError:
pass
return False
def _parse_pab_config(self, body):
parsed_xml = xmltodict.parse(body)
parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None)
# If Python 2, fix the unicode strings:
if sys.version_info[0] < 3:
parsed_xml = {
"PublicAccessBlockConfiguration": py2_strip_unicode_keys(
dict(parsed_xml["PublicAccessBlockConfiguration"])
)
}
return parsed_xml
def _bucket_response_put(
self, request, body, region_name, bucket_name, querystring
):
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
self._set_action("BUCKET", "PUT", querystring)
self._authenticate_and_authorize_s3_action()
if "object-lock" in querystring:
body_decoded = body.decode()
config = self._lock_config_from_xml(body_decoded)
if not self.backend.get_bucket(bucket_name).object_lock_enabled:
raise BucketMustHaveLockeEnabled
self.backend.put_object_lock_configuration(
bucket_name,
config.get("enabled"),
config.get("mode"),
config.get("days"),
config.get("years"),
)
return 200, {}, ""
if "versioning" in querystring:
ver = re.search("<Status>([A-Za-z]+)</Status>", body.decode())
if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING)
return template.render(bucket_versioning_status=ver.group(1))
else:
return 404, {}, ""
elif "lifecycle" in querystring:
rules = xmltodict.parse(body)["LifecycleConfiguration"]["Rule"]
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.put_bucket_lifecycle(bucket_name, rules)
return ""
elif "policy" in querystring:
self.backend.put_bucket_policy(bucket_name, body)
return "True"
elif "acl" in querystring:
# Headers are first. If not set, then look at the body (consistent with the documentation):
acls = self._acl_from_headers(request.headers)
if not acls:
acls = self._acl_from_xml(body)
self.backend.put_bucket_acl(bucket_name, acls)
return ""
elif "tagging" in querystring:
tagging = self._bucket_tagging_from_xml(body)
self.backend.put_bucket_tagging(bucket_name, tagging)
return ""
elif "website" in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
return ""
elif "cors" in querystring:
try:
self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body))
return ""
except KeyError:
raise MalformedXML()
elif "logging" in querystring:
try:
self.backend.put_bucket_logging(
bucket_name, self._logging_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
elif "notification" in querystring:
try:
self.backend.put_bucket_notification_configuration(
bucket_name, self._notification_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "accelerate" in querystring:
try:
accelerate_status = self._accelerate_config_from_xml(body)
self.backend.put_bucket_accelerate_configuration(
bucket_name, accelerate_status
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "publicAccessBlock" in querystring:
pab_config = self._parse_pab_config(body)
self.backend.put_bucket_public_access_block(
bucket_name, pab_config["PublicAccessBlockConfiguration"]
)
return ""
elif "encryption" in querystring:
try:
self.backend.put_bucket_encryption(
bucket_name, self._encryption_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else:
# us-east-1, the default AWS region behaves a bit differently
# - you should not use it as a location constraint --> it fails
# - querying the location constraint returns None
# - LocationConstraint has to be specified if outside us-east-1
if (
region_name != DEFAULT_REGION_NAME
and not self._body_contains_location_constraint(body)
):
raise IllegalLocationConstraintException()
if body:
if self._create_bucket_configuration_is_empty(body):
raise MalformedXML()
try:
forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][
"LocationConstraint"
]
if forced_region == DEFAULT_REGION_NAME:
raise S3ClientError(
"InvalidLocationConstraint",
"The specified location-constraint is not valid",
)
else:
region_name = forced_region
except KeyError:
pass
try:
new_bucket = self.backend.create_bucket(bucket_name, region_name)
except BucketAlreadyExists:
new_bucket = self.backend.get_bucket(bucket_name)
if (
new_bucket.region_name == DEFAULT_REGION_NAME
and region_name == DEFAULT_REGION_NAME
):
# us-east-1 has different behavior - creating a bucket there is an idempotent operation
pass
else:
template = self.response_template(S3_DUPLICATE_BUCKET_ERROR)
return 409, {}, template.render(bucket_name=bucket_name)
if "x-amz-acl" in request.headers:
# TODO: Support the XML-based ACL format
self.backend.put_bucket_acl(
bucket_name, self._acl_from_headers(request.headers)
)
if (
request.headers.get("x-amz-bucket-object-lock-enabled", "").lower()
== "true"
):
new_bucket.object_lock_enabled = True
new_bucket.versioning_status = "Enabled"
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, {}, template.render(bucket=new_bucket)
def _bucket_response_delete(self, body, bucket_name, querystring):
self._set_action("BUCKET", "DELETE", querystring)
self._authenticate_and_authorize_s3_action()
if "policy" in querystring:
self.backend.delete_bucket_policy(bucket_name, body)
return 204, {}, ""
elif "tagging" in querystring:
self.backend.delete_bucket_tagging(bucket_name)
return 204, {}, ""
elif "website" in querystring:
self.backend.delete_bucket_website(bucket_name)
return 204, {}, ""
elif "cors" in querystring:
self.backend.delete_bucket_cors(bucket_name)
return 204, {}, ""
elif "lifecycle" in querystring:
self.backend.delete_bucket_lifecycle(bucket_name)
return 204, {}, ""
elif "publicAccessBlock" in querystring:
self.backend.delete_public_access_block(bucket_name)
return 204, {}, ""
elif "encryption" in querystring:
self.backend.delete_bucket_encryption(bucket_name)
return 204, {}, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
template = self.response_template(S3_DELETE_BUCKET_SUCCESS)
return 204, {}, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, {}, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, body, bucket_name):
response_headers = {}
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
path = self._get_path(request)
if self.is_delete_keys(request, path, bucket_name):
self.data["Action"] = "DeleteObject"
self._authenticate_and_authorize_s3_action()
return self._bucket_response_delete_keys(request, body, bucket_name)
self.data["Action"] = "PutObject"
self._authenticate_and_authorize_s3_action()
# POST to bucket-url should create file from form
if hasattr(request, "form"):
# Not HTTPretty
form = request.form
else:
# HTTPretty, build new form object
body = body.decode()
form = dict(parse_qsl(body))
key = form["key"]
if "file" in form:
f = form["file"]
else:
fobj = request.files["file"]
f = fobj.stream.read()
key = key.replace("${filename}", os.path.basename(fobj.filename))
if "success_action_redirect" in form:
redirect = form["success_action_redirect"]
parts = urlparse(redirect)
queryargs = parse_qs(parts.query)
queryargs["key"] = key
queryargs["bucket"] = bucket_name
redirect_queryargs = urlencode(queryargs, doseq=True)
newparts = (
parts.scheme,
parts.netloc,
parts.path,
parts.params,
redirect_queryargs,
parts.fragment,
)
fixed_redirect = urlunparse(newparts)
response_headers["Location"] = fixed_redirect
if "success_action_status" in form:
status_code = form["success_action_status"]
elif "success_action_redirect" in form:
status_code = 303
else:
status_code = 204
new_key = self.backend.put_object(bucket_name, key, f)
if form.get("acl"):
acl = get_canned_acl(form.get("acl"))
new_key.set_acl(acl)
# Metadata
metadata = metadata_from_headers(form)
new_key.set_metadata(metadata)
return status_code, response_headers, ""
@staticmethod
def _get_path(request):
if isinstance(request, HTTPrettyRequest):
path = request.path
else:
path = (
request.full_path
if hasattr(request, "full_path")
else path_url(request.url)
)
return path
def _bucket_response_delete_keys(self, request, body, bucket_name):
template = self.response_template(S3_DELETE_KEYS_RESPONSE)
body_dict = xmltodict.parse(body, strip_whitespace=False)
objects = body_dict["Delete"].get("Object", [])
if not isinstance(objects, list):
# We expect a list of objects, but when there is a single <Object> node xmltodict does not
# return a list.
objects = [objects]
if len(objects) == 0:
raise MalformedXML()
deleted_objects = self.backend.delete_objects(bucket_name, objects)
error_names = []
return (
200,
{},
template.render(deleted=deleted_objects, delete_errors=error_names),
)
def _handle_range_header(self, request, headers, response_content):
response_headers = {}
length = len(response_content)
last = length - 1
_, rspec = request.headers.get("range").split("=")
if "," in rspec:
raise NotImplementedError("Multiple range specifiers not supported")
def toint(i):
return int(i) if i else None
begin, end = map(toint, rspec.split("-"))
if begin is not None: # byte range
end = last if end is None else min(end, last)
elif end is not None: # suffix byte range
begin = length - min(end, length)
end = last
else:
return 400, response_headers, ""
if begin < 0 or end > last or begin > min(end, last):
raise InvalidRange(
actual_size=str(length), range_requested=request.headers.get("range")
)
response_headers["content-range"] = "bytes {0}-{1}/{2}".format(
begin, end, length
)
content = response_content[begin : end + 1]
response_headers["content-length"] = len(content)
return 206, response_headers, content
def _handle_v4_chunk_signatures(self, body, content_length):
body_io = io.BytesIO(body)
new_body = bytearray(content_length)
pos = 0
line = body_io.readline()
while line:
# https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
# str(hex(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
chunk_size = int(line[: line.find(b";")].decode("utf8"), 16)
new_body[pos : pos + chunk_size] = body_io.read(chunk_size)
pos = pos + chunk_size
body_io.read(2) # skip trailing \r\n
line = body_io.readline()
return bytes(new_body)
@amzn_request_id
def key_or_control_response(self, request, full_url, headers):
# Key and Control are lumped in because splitting out the regex is too much of a pain :/
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
response_headers = {}
try:
# Is this an S3 control response?
if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url:
response = self._control_response(request, full_url, headers)
else:
response = self._key_response(request, full_url, self.headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
if isinstance(response, str):
status_code = 200
response_content = response
else:
status_code, response_headers, response_content = response
if (
status_code == 200
and "range" in request.headers
and request.headers["range"] != ""
):
try:
return self._handle_range_header(
request, response_headers, response_content
)
except S3ClientError as s3error:
return s3error.code, {}, s3error.description
return status_code, response_headers, response_content
def _control_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if body is None:
body = b""
if method == "GET":
return self._control_response_get(request, query, headers)
elif method == "PUT":
return self._control_response_put(request, body, query, headers)
elif method == "DELETE":
return self._control_response_delete(request, query, headers)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _control_response_get(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "GET", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
public_block_config = self.backend.get_account_public_access_block(
headers["x-amz-account-id"]
)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return (
200,
response_headers,
template.render(public_block_config=public_block_config),
)
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_put(self, request, body, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "PUT", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
pab_config = self._parse_pab_config(body)
self.backend.put_account_public_access_block(
headers["x-amz-account-id"],
pab_config["PublicAccessBlockConfiguration"],
)
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_delete(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "DELETE", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
self.backend.delete_account_public_access_block(headers["x-amz-account-id"])
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
key_name = self.parse_key_name(request, parsed_url.path)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
# Because we patch the requests library the boto/boto3 API
# requests go through this method but so do
# `requests.get("https://bucket-name.s3.amazonaws.com/file-name")`
# Here we deny public access to private files by checking the
# ACL and checking for the mere presence of an Authorization
# header.
if "Authorization" not in request.headers:
if hasattr(request, "url"):
signed_url = "Signature=" in request.url
elif hasattr(request, "requestline"):
signed_url = "Signature=" in request.path
key = self.backend.get_object(bucket_name, key_name)
if key:
if not key.acl.public_read and not signed_url:
return 403, {}, ""
elif signed_url:
# coming in from requests.get(s3.generate_presigned_url())
if self._invalid_headers(request.url, dict(request.headers)):
return 403, {}, S3_INVALID_PRESIGNED_PARAMETERS
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if not body:
# when the data is being passed as a file
if request.files:
for _, value in request.files.items():
body = value.stream.read()
elif hasattr(request, "form"):
# Body comes through as part of the form, if no content-type is set on the PUT-request
# form = ImmutableMultiDict([('some data 123 321', '')])
form = request.form
for k, _ in form.items():
body = k
if body is None:
body = b""
if (
request.headers.get("x-amz-content-sha256", None)
== "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
):
body = self._handle_v4_chunk_signatures(
body, int(request.headers["x-amz-decoded-content-length"])
)
if method == "GET":
return self._key_response_get(
bucket_name, query, key_name, headers=request.headers
)
elif method == "PUT":
return self._key_response_put(
request, body, bucket_name, query, key_name, headers
)
elif method == "HEAD":
return self._key_response_head(
bucket_name, query, key_name, headers=request.headers
)
elif method == "DELETE":
return self._key_response_delete(headers, bucket_name, query, key_name)
elif method == "POST":
return self._key_response_post(request, body, bucket_name, query, key_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _key_response_get(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "GET", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId"):
upload_id = query["uploadId"][0]
# 0 <= PartNumberMarker <= 2,147,483,647
part_number_marker = int(query.get("part-number-marker", [0])[0])
if not (0 <= part_number_marker <= 2147483647):
raise InvalidMaxPartArgument("part-number-marker", 0, 2147483647)
# 0 <= MaxParts <= 2,147,483,647 (default is 1,000)
max_parts = int(query.get("max-parts", [1000])[0])
if not (0 <= max_parts <= 2147483647):
raise InvalidMaxPartArgument("max-parts", 0, 2147483647)
parts = self.backend.list_parts(
bucket_name,
upload_id,
part_number_marker=part_number_marker,
max_parts=max_parts,
)
next_part_number_marker = parts[-1].name + 1 if parts else 0
is_truncated = parts and self.backend.is_truncated(
bucket_name, upload_id, next_part_number_marker
)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
return (
200,
response_headers,
template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
is_truncated=str(is_truncated).lower(),
max_parts=max_parts,
next_part_number_marker=next_part_number_marker,
parts=parts,
part_number_marker=part_number_marker,
),
)
version_id = query.get("versionId", [None])[0]
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
if key is None and version_id is None:
raise MissingKey(key_name)
elif key is None:
raise MissingVersion()
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
raise PreconditionFailed("If-Unmodified-Since")
if if_match and key.etag not in [if_match, '"{0}"'.format(if_match)]:
raise PreconditionFailed("If-Match")
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
if "acl" in query:
acl = s3_backend.get_object_acl(key)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, response_headers, template.render(acl=acl)
if "tagging" in query:
tags = self.backend.get_object_tagging(key)["Tags"]
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return 200, response_headers, template.render(tags=tags)
if "legal-hold" in query:
legal_hold = self.backend.get_object_legal_hold(key)
template = self.response_template(S3_OBJECT_LEGAL_HOLD)
return 200, response_headers, template.render(legal_hold=legal_hold)
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
return 200, response_headers, key.value
def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
self._set_action("KEY", "PUT", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId") and query.get("partNumber"):
upload_id = query["uploadId"][0]
part_number = int(query["partNumber"][0])
if "x-amz-copy-source" in request.headers:
src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/")
src_bucket, src_key = src.split("/", 1)
src_key, src_version_id = (
src_key.split("?versionId=")
if "?versionId=" in src_key
else (src_key, None)
)
src_range = request.headers.get("x-amz-copy-source-range", "").split(
"bytes="
)[-1]
try:
start_byte, end_byte = src_range.split("-")
start_byte, end_byte = int(start_byte), int(end_byte)
except ValueError:
start_byte, end_byte = None, None
if self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
):
key = self.backend.copy_part(
bucket_name,
upload_id,
part_number,
src_bucket,
src_key,
src_version_id,
start_byte,
end_byte,
)
else:
return 404, response_headers, ""
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.upload_part(
bucket_name, upload_id, part_number, body
)
response = ""
response_headers.update(key.response_dict)
return 200, response_headers, response
storage_class = request.headers.get("x-amz-storage-class", "STANDARD")
encryption = request.headers.get("x-amz-server-side-encryption", None)
kms_key_id = request.headers.get(
"x-amz-server-side-encryption-aws-kms-key-id", None
)
bucket_key_enabled = request.headers.get(
"x-amz-server-side-encryption-bucket-key-enabled", None
)
if bucket_key_enabled is not None:
bucket_key_enabled = str(bucket_key_enabled).lower()
bucket = self.backend.get_bucket(bucket_name)
lock_enabled = bucket.object_lock_enabled
lock_mode = request.headers.get("x-amz-object-lock-mode", None)
lock_until = request.headers.get("x-amz-object-lock-retain-until-date", None)
legal_hold = request.headers.get("x-amz-object-lock-legal-hold", "OFF")
if lock_mode or lock_until or legal_hold == "ON":
if not request.headers.get("Content-Md5"):
raise InvalidContentMD5
if not lock_enabled:
raise LockNotEnabled
elif lock_enabled and bucket.has_default_lock:
if not request.headers.get("Content-Md5"):
raise InvalidContentMD5
lock_until = bucket.default_retention()
lock_mode = bucket.default_lock_mode
acl = self._acl_from_headers(request.headers)
if acl is None:
acl = self.backend.get_bucket(bucket_name).acl
tagging = self._tagging_from_headers(request.headers)
if "retention" in query:
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
retention = self._mode_until_from_xml(body)
self.backend.put_object_retention(
bucket_name, key_name, version_id=version_id, retention=retention
)
return 200, response_headers, ""
if "legal-hold" in query:
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
legal_hold_status = self._legal_hold_status_from_xml(body)
self.backend.put_object_legal_hold(
bucket_name, key_name, version_id, legal_hold_status
)
return 200, response_headers, ""
if "acl" in query:
self.backend.put_object_acl(bucket_name, key_name, acl)
return 200, response_headers, ""
if "tagging" in query:
if "versionId" in query:
version_id = query["versionId"][0]
else:
version_id = None
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
tagging = self._tagging_from_xml(body)
self.backend.set_key_tags(key, tagging, key_name)
return 200, response_headers, ""
if "x-amz-copy-source" in request.headers:
# Copy key
# you can have a quoted ?version=abc with a version Id, so work on
# we need to parse the unquoted string first
src_key = request.headers.get("x-amz-copy-source")
if isinstance(src_key, bytes):
src_key = src_key.decode("utf-8")
src_key_parsed = urlparse(src_key)
src_bucket, src_key = (
clean_key_name(src_key_parsed.path).lstrip("/").split("/", 1)
)
src_version_id = parse_qs(src_key_parsed.query).get("versionId", [None])[0]
key = self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
)
if key is not None:
if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]:
if key.response_dict.get(
"x-amz-restore"
) is None or 'ongoing-request="true"' in key.response_dict.get(
"x-amz-restore"
):
raise ObjectNotInActiveTierError(key)
self.backend.copy_object(
src_bucket,
src_key,
bucket_name,
key_name,
storage=storage_class,
acl=acl,
src_version_id=src_version_id,
)
else:
return 404, response_headers, ""
new_key = self.backend.get_object(bucket_name, key_name)
mdirective = request.headers.get("x-amz-metadata-directive")
if mdirective is not None and mdirective == "REPLACE":
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True)
tdirective = request.headers.get("x-amz-tagging-directive")
if tdirective == "REPLACE":
tagging = self._tagging_from_headers(request.headers)
self.backend.set_key_tags(new_key, tagging)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
streaming_request = hasattr(request, "streaming") and request.streaming
closing_connection = headers.get("connection") == "close"
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_object(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.put_object(
bucket_name,
key_name,
body,
storage=storage_class,
encryption=encryption,
kms_key_id=kms_key_id,
bucket_key_enabled=bucket_key_enabled,
lock_mode=lock_mode,
lock_legal_status=legal_hold,
lock_until=lock_until,
)
request.streaming = True
metadata = metadata_from_headers(request.headers)
metadata.update(metadata_from_headers(query))
new_key.set_metadata(metadata)
new_key.set_acl(acl)
new_key.website_redirect_location = request.headers.get(
"x-amz-website-redirect-location"
)
self.backend.set_key_tags(new_key, tagging)
response_headers.update(new_key.response_dict)
return 200, response_headers, ""
def _key_response_head(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "HEAD", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
version_id = query.get("versionId", [None])[0]
part_number = query.get("partNumber", [None])[0]
if part_number:
part_number = int(part_number)
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.head_object(
bucket_name, key_name, version_id=version_id, part_number=part_number
)
if key:
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
return 412, response_headers, ""
if if_match and key.etag != if_match:
return 412, response_headers, ""
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
return 200, response_headers, ""
else:
return 404, response_headers, ""
def _lock_config_from_xml(self, xml):
response_dict = {"enabled": False, "mode": None, "days": None, "years": None}
parsed_xml = xmltodict.parse(xml)
enabled = (
parsed_xml["ObjectLockConfiguration"]["ObjectLockEnabled"] == "Enabled"
)
response_dict["enabled"] = enabled
default_retention = parsed_xml.get("ObjectLockConfiguration").get("Rule")
if default_retention:
default_retention = default_retention.get("DefaultRetention")
mode = default_retention["Mode"]
days = int(default_retention.get("Days", 0))
years = int(default_retention.get("Years", 0))
if days and years:
raise MalformedXML
response_dict["mode"] = mode
response_dict["days"] = days
response_dict["years"] = years
return response_dict
def _acl_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml.get("AccessControlPolicy"):
raise MalformedACLError()
# The owner is needed for some reason...
if not parsed_xml["AccessControlPolicy"].get("Owner"):
# TODO: Validate that the Owner is actually correct.
raise MalformedACLError()
# If empty, then no ACLs:
if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None:
return []
if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"):
raise MalformedACLError()
permissions = ["READ", "WRITE", "READ_ACP", "WRITE_ACP", "FULL_CONTROL"]
if not isinstance(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list
):
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = [
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"]
]
grants = self._get_grants_from_xml(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"],
MalformedACLError,
permissions,
)
return FakeAcl(grants)
def _get_grants_from_xml(self, grant_list, exception_type, permissions):
grants = []
for grant in grant_list:
if grant.get("Permission", "") not in permissions:
raise exception_type()
if grant["Grantee"].get("@xsi:type", "") not in [
"CanonicalUser",
"AmazonCustomerByEmail",
"Group",
]:
raise exception_type()
# TODO: Verify that the proper grantee data is supplied based on the type.
grants.append(
FakeGrant(
[
FakeGrantee(
id=grant["Grantee"].get("ID", ""),
display_name=grant["Grantee"].get("DisplayName", ""),
uri=grant["Grantee"].get("URI", ""),
)
],
[grant["Permission"]],
)
)
return grants
def _acl_from_headers(self, headers):
canned_acl = headers.get("x-amz-acl", "")
if canned_acl:
return get_canned_acl(canned_acl)
grants = []
for header, value in headers.items():
header = header.lower()
if not header.startswith("x-amz-grant-"):
continue
permission = {
"read": "READ",
"write": "WRITE",
"read-acp": "READ_ACP",
"write-acp": "WRITE_ACP",
"full-control": "FULL_CONTROL",
}[header[len("x-amz-grant-") :]]
grantees = []
for key_and_value in value.split(","):
key, value = re.match(
'([^=]+)="?([^"]+)"?', key_and_value.strip()
).groups()
if key.lower() == "id":
grantees.append(FakeGrantee(id=value))
else:
grantees.append(FakeGrantee(uri=value))
grants.append(FakeGrant(grantees, [permission]))
if grants:
return FakeAcl(grants)
else:
return None
def _tagging_from_headers(self, headers):
tags = {}
if headers.get("x-amz-tagging"):
parsed_header = parse_qs(headers["x-amz-tagging"], keep_blank_values=True)
for tag in parsed_header.items():
tags[tag[0]] = tag[1][0]
return tags
def _tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml, force_list={"Tag": True})
tags = {}
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
tags[tag["Key"]] = tag["Value"]
return tags
def _bucket_tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
tags = {}
# Optional if no tags are being sent:
if parsed_xml["Tagging"].get("TagSet"):
# If there is only 1 tag, then it's not a list:
if not isinstance(parsed_xml["Tagging"]["TagSet"]["Tag"], list):
tags[parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"]] = parsed_xml[
"Tagging"
]["TagSet"]["Tag"]["Value"]
else:
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
if tag["Key"] in tags:
raise DuplicateTagKeys()
tags[tag["Key"]] = tag["Value"]
# Verify that "aws:" is not in the tags. If so, then this is a problem:
for key, _ in tags.items():
if key.startswith("aws:"):
raise NoSystemTags()
return tags
def _cors_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if isinstance(parsed_xml["CORSConfiguration"]["CORSRule"], list):
return [cors for cors in parsed_xml["CORSConfiguration"]["CORSRule"]]
return [parsed_xml["CORSConfiguration"]["CORSRule"]]
def _mode_until_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
return (
parsed_xml["Retention"]["Mode"],
parsed_xml["Retention"]["RetainUntilDate"],
)
def _legal_hold_status_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
return parsed_xml["LegalHold"]["Status"]
def _encryption_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if (
not parsed_xml["ServerSideEncryptionConfiguration"].get("Rule")
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"].get(
"ApplyServerSideEncryptionByDefault"
)
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"][
"ApplyServerSideEncryptionByDefault"
].get("SSEAlgorithm")
):
raise MalformedXML()
return [parsed_xml["ServerSideEncryptionConfiguration"]]
def _logging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"):
return {}
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"):
raise MalformedXML()
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"):
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = ""
# Get the ACLs:
if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"):
permissions = ["READ", "WRITE", "FULL_CONTROL"]
if not isinstance(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
list,
):
target_grants = self._get_grants_from_xml(
[
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
]["Grant"]
],
MalformedXML,
permissions,
)
else:
target_grants = self._get_grants_from_xml(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
MalformedXML,
permissions,
)
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
] = target_grants
return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]
def _notification_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not len(parsed_xml["NotificationConfiguration"]):
return {}
# The types of notifications, and their required fields (apparently lambda is categorized by the API as
# "CloudFunction"):
notification_fields = [
("Topic", "sns"),
("Queue", "sqs"),
("CloudFunction", "lambda"),
]
event_names = [
"s3:ReducedRedundancyLostObject",
"s3:ObjectCreated:*",
"s3:ObjectCreated:Put",
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:CompleteMultipartUpload",
"s3:ObjectRemoved:*",
"s3:ObjectRemoved:Delete",
"s3:ObjectRemoved:DeleteMarkerCreated",
]
found_notifications = (
0 # Tripwire -- if this is not ever set, then there were no notifications
)
for name, arn_string in notification_fields:
# 1st verify that the proper notification configuration has been passed in (with an ARN that is close
# to being correct -- nothing too complex in the ARN logic):
the_notification = parsed_xml["NotificationConfiguration"].get(
"{}Configuration".format(name)
)
if the_notification:
found_notifications += 1
if not isinstance(the_notification, list):
the_notification = parsed_xml["NotificationConfiguration"][
"{}Configuration".format(name)
] = [the_notification]
for n in the_notification:
if not n[name].startswith("arn:aws:{}:".format(arn_string)):
raise InvalidNotificationARN()
# 2nd, verify that the Events list is correct:
assert n["Event"]
if not isinstance(n["Event"], list):
n["Event"] = [n["Event"]]
for event in n["Event"]:
if event not in event_names:
raise InvalidNotificationEvent()
# Parse out the filters:
if n.get("Filter"):
# Error if S3Key is blank:
if not n["Filter"]["S3Key"]:
raise KeyError()
if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list):
n["Filter"]["S3Key"]["FilterRule"] = [
n["Filter"]["S3Key"]["FilterRule"]
]
for filter_rule in n["Filter"]["S3Key"]["FilterRule"]:
assert filter_rule["Name"] in ["suffix", "prefix"]
assert filter_rule["Value"]
if not found_notifications:
return {}
return parsed_xml["NotificationConfiguration"]
def _accelerate_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
config = parsed_xml["AccelerateConfiguration"]
return config["Status"]
def _key_response_delete(self, headers, bucket_name, query, key_name):
self._set_action("KEY", "DELETE", query)
self._authenticate_and_authorize_s3_action()
if query.get("uploadId"):
upload_id = query["uploadId"][0]
self.backend.abort_multipart_upload(bucket_name, upload_id)
return 204, {}, ""
version_id = query.get("versionId", [None])[0]
if "tagging" in query:
self.backend.delete_object_tagging(
bucket_name, key_name, version_id=version_id
)
template = self.response_template(S3_DELETE_KEY_TAGGING_RESPONSE)
return 204, {}, template.render(version_id=version_id)
bypass = headers.get("X-Amz-Bypass-Governance-Retention")
success, response_meta = self.backend.delete_object(
bucket_name, key_name, version_id=version_id, bypass=bypass
)
response_headers = {}
if response_meta is not None:
for k in response_meta:
response_headers["x-amz-{}".format(k)] = response_meta[k]
return 204, response_headers, ""
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName("Part")
prev = 0
for p in ps:
pn = int(p.getElementsByTagName("PartNumber")[0].firstChild.wholeText)
if pn <= prev:
raise InvalidPartOrder()
yield (pn, p.getElementsByTagName("ETag")[0].firstChild.wholeText)
def _key_response_post(self, request, body, bucket_name, query, key_name):
self._set_action("KEY", "POST", query)
self._authenticate_and_authorize_s3_action()
if body == b"" and "uploads" in query:
metadata = metadata_from_headers(request.headers)
storage_type = request.headers.get("x-amz-storage-class", "STANDARD")
multipart_id = self.backend.create_multipart_upload(
bucket_name, key_name, metadata, storage_type
)
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name, key_name=key_name, upload_id=multipart_id
)
return 200, {}, response
if query.get("uploadId"):
body = self._complete_multipart_body(body)
multipart_id = query["uploadId"][0]
multipart, value, etag = self.backend.complete_multipart_upload(
bucket_name, multipart_id, body
)
if value is None:
return 400, {}, ""
key = self.backend.put_object(
bucket_name,
multipart.key_name,
value,
storage=multipart.storage,
etag=etag,
multipart=multipart,
)
key.set_metadata(multipart.metadata)
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
headers = {}
if key.version_id:
headers["x-amz-version-id"] = key.version_id
return (
200,
headers,
template.render(
bucket_name=bucket_name, key_name=key.name, etag=key.etag
),
)
elif "restore" in query:
es = minidom.parseString(body).getElementsByTagName("Days")
days = es[0].childNodes[0].wholeText
key = self.backend.get_object(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, {}, ""
else:
raise NotImplementedError(
"Method POST had only been implemented for multipart uploads and restore operations, so far"
)
# MASKED: _invalid_headers function (lines 1994-2015)
S3ResponseInstance = ResponseObject(s3_backend)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f41161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>{{ bucket.creation_date_ISO8601 }}</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
</ListAllMyBucketsResult>"""
S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_marker %}
<NextMarker>{{ next_marker }}</NextMarker>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_GET_RESPONSE_V2 = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
<KeyCount>{{ key_count }}</KeyCount>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_continuation_token %}
<NextContinuationToken>{{ next_continuation_token }}</NextContinuationToken>
{% endif %}
{% if start_after %}
<StartAfter>{{ start_after }}</StartAfter>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
{% if fetch_owner %}
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
{% endif %}
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<CreateBucketResponse>
<Bucket>{{ bucket.name }}</Bucket>
</CreateBucketResponse>
</CreateBucketResponse>"""
S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteBucketResponse>
<Code>204</Code>
<Description>No Content</Description>
</DeleteBucketResponse>
</DeleteBucketResponse>"""
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketNotEmpty</Code>
<Message>The bucket you tried to delete is not empty</Message>
<BucketName>{{ bucket.name }}</BucketName>
<RequestId>asdfasdfsdafds</RequestId>
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
</Error>"""
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{% if location != None %}{{ location }}{% endif %}</LocationConstraint>"""
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for rule in rules %}
<Rule>
<ID>{{ rule.id }}</ID>
{% if rule.filter %}
<Filter>
{% if rule.filter.prefix != None %}
<Prefix>{{ rule.filter.prefix }}</Prefix>
{% endif %}
{% if rule.filter.tag_key %}
<Tag>
<Key>{{ rule.filter.tag_key }}</Key>
<Value>{{ rule.filter.tag_value }}</Value>
</Tag>
{% endif %}
{% if rule.filter.and_filter %}
<And>
{% if rule.filter.and_filter.prefix != None %}
<Prefix>{{ rule.filter.and_filter.prefix }}</Prefix>
{% endif %}
{% for key, value in rule.filter.and_filter.tags.items() %}
<Tag>
<Key>{{ key }}</Key>
<Value>{{ value }}</Value>
</Tag>
{% endfor %}
</And>
{% endif %}
</Filter>
{% else %}
{% if rule.prefix != None %}
<Prefix>{{ rule.prefix }}</Prefix>
{% endif %}
{% endif %}
<Status>{{ rule.status }}</Status>
{% if rule.storage_class %}
<Transition>
{% if rule.transition_days %}
<Days>{{ rule.transition_days }}</Days>
{% endif %}
{% if rule.transition_date %}
<Date>{{ rule.transition_date }}</Date>
{% endif %}
<StorageClass>{{ rule.storage_class }}</StorageClass>
</Transition>
{% endif %}
{% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %}
<Expiration>
{% if rule.expiration_days %}
<Days>{{ rule.expiration_days }}</Days>
{% endif %}
{% if rule.expiration_date %}
<Date>{{ rule.expiration_date }}</Date>
{% endif %}
{% if rule.expired_object_delete_marker %}
<ExpiredObjectDeleteMarker>{{ rule.expired_object_delete_marker }}</ExpiredObjectDeleteMarker>
{% endif %}
</Expiration>
{% endif %}
{% if rule.nvt_noncurrent_days and rule.nvt_storage_class %}
<NoncurrentVersionTransition>
<NoncurrentDays>{{ rule.nvt_noncurrent_days }}</NoncurrentDays>
<StorageClass>{{ rule.nvt_storage_class }}</StorageClass>
</NoncurrentVersionTransition>
{% endif %}
{% if rule.nve_noncurrent_days %}
<NoncurrentVersionExpiration>
<NoncurrentDays>{{ rule.nve_noncurrent_days }}</NoncurrentDays>
</NoncurrentVersionExpiration>
{% endif %}
{% if rule.aimu_days %}
<AbortIncompleteMultipartUpload>
<DaysAfterInitiation>{{ rule.aimu_days }}</DaysAfterInitiation>
</AbortIncompleteMultipartUpload>
{% endif %}
</Rule>
{% endfor %}
</LifecycleConfiguration>
"""
S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
{% if common_prefixes %}
{% for prefix in common_prefixes %}
<CommonPrefixes>
<Prefix>{{ prefix }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
<Delimiter>{{ delimiter }}</Delimiter>
<KeyMarker>{{ key_marker or "" }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %}</VersionId>
<IsLatest>{{ 'true' if key.is_latest else 'false' }}</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
{% for marker in delete_marker_list %}
<DeleteMarker>
<Key>{{ marker.name }}</Key>
<VersionId>{{ marker.version_id }}</VersionId>
<IsLatest>{{ 'true' if marker.is_latest else 'false' }}</IsLatest>
<LastModified>{{ marker.last_modified_ISO8601 }}</LastModified>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</DeleteMarker>
{% endfor %}
</ListVersionsResult>
"""
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k, v in deleted %}
<Deleted>
<Key>{{k}}</Key>
{% if v %}<VersionId>{{v}}</VersionId>{% endif %}
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
S3_DELETE_KEY_TAGGING_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteObjectTaggingResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<VersionId>{{version_id}}</VersionId>
</DeleteObjectTaggingResult>
"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grantee.type }}">
{% if grantee.uri %}
<URI>{{ grantee.uri }}</URI>
{% endif %}
{% if grantee.id %}
<ID>{{ grantee.id }}</ID>
{% endif %}
{% if grantee.display_name %}
<DisplayName>{{ grantee.display_name }}</DisplayName>
{% endif %}
</Grantee>
{% endfor %}
{% for permission in grant.permissions %}
<Permission>{{ permission }}</Permission>
{% endfor %}
</Grant>
{% endfor %}
</AccessControlList>
</AccessControlPolicy>"""
S3_OBJECT_LEGAL_HOLD = """<?xml version="1.0" encoding="UTF-8"?>
<LegalHold>
<Status>{{ legal_hold }}</Status>
</LegalHold>
"""
S3_OBJECT_TAGGING_RESPONSE = """\
<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
{% for tag in tags %}
<Tag>
<Key>{{ tag.Key }}</Key>
<Value>{{ tag.Value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_CORS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration>
{% for cors in cors %}
<CORSRule>
{% for origin in cors.allowed_origins %}
<AllowedOrigin>{{ origin }}</AllowedOrigin>
{% endfor %}
{% for method in cors.allowed_methods %}
<AllowedMethod>{{ method }}</AllowedMethod>
{% endfor %}
{% if cors.allowed_headers is not none %}
{% for header in cors.allowed_headers %}
<AllowedHeader>{{ header }}</AllowedHeader>
{% endfor %}
{% endif %}
{% if cors.exposed_headers is not none %}
{% for header in cors.exposed_headers %}
<ExposedHeader>{{ header }}</ExposedHeader>
{% endfor %}
{% endif %}
{% if cors.max_age_seconds is not none %}
<MaxAgeSeconds>{{ cors.max_age_seconds }}</MaxAgeSeconds>
{% endif %}
</CORSRule>
{% endfor %}
</CORSConfiguration>
"""
S3_OBJECT_COPY_RESPONSE = """\
<CopyObjectResult xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</CopyObjectResult>"""
S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
</InitiateMultipartUploadResult>"""
S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
</CopyPartResult>"""
S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
<StorageClass>STANDARD</StorageClass>
<Initiator>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<PartNumberMarker>{{ part_number_marker }}</PartNumberMarker>
<NextPartNumberMarker>{{ next_part_number_marker }}</NextPartNumberMarker>
<MaxParts>{{ max_parts }}</MaxParts>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for part in parts %}
<Part>
<PartNumber>{{ part.name }}</PartNumber>
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
<Size>{{ part.size }}</Size>
</Part>
{% endfor %}
</ListPartsResult>"""
S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location>
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<ETag>{{ etag }}</ETag>
</CompleteMultipartUploadResult>
"""
S3_ALL_MULTIPARTS = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<KeyMarker></KeyMarker>
<UploadIdMarker></UploadIdMarker>
<MaxUploads>1000</MaxUploads>
<IsTruncated>false</IsTruncated>
{% for upload in uploads %}
<Upload>
<Key>{{ upload.key_name }}</Key>
<UploadId>{{ upload.id }}</UploadId>
<Initiator>
<ID>arn:aws:iam::"""
+ ACCOUNT_ID
+ """:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID>
<DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated>
</Upload>
{% endfor %}
</ListMultipartUploadsResult>
"""
)
S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucketPolicy</Code>
<Message>The bucket policy does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_LIFECYCLE = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchLifecycleConfiguration</Code>
<Message>The lifecycle configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_TAGGING = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchTagSet</Code>
<Message>The TagSet does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_WEBSITE_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_INVALID_CORS_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_CORS_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchCORSConfiguration</Code>
<Message>The CORS configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<LoggingEnabled>
<TargetBucket>{{ logging["TargetBucket"] }}</TargetBucket>
<TargetPrefix>{{ logging["TargetPrefix"] }}</TargetPrefix>
{% if logging.get("TargetGrants") %}
<TargetGrants>
{% for grant in logging["TargetGrants"] %}
<Grant>
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grant.grantees[0].type }}">
{% if grant.grantees[0].uri %}
<URI>{{ grant.grantees[0].uri }}</URI>
{% endif %}
{% if grant.grantees[0].id %}
<ID>{{ grant.grantees[0].id }}</ID>
{% endif %}
{% if grant.grantees[0].display_name %}
<DisplayName>{{ grant.grantees[0].display_name }}</DisplayName>
{% endif %}
</Grantee>
<Permission>{{ grant.permissions[0] }}</Permission>
</Grant>
{% endfor %}
</TargetGrants>
{% endif %}
</LoggingEnabled>
</BucketLoggingStatus>
"""
S3_NO_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />
"""
S3_ENCRYPTION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<ServerSideEncryptionConfiguration xmlns="http://doc.s3.amazonaws.com/2006-03-01">
{% for entry in encryption %}
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] }}</SSEAlgorithm>
{% if entry["Rule"]["ApplyServerSideEncryptionByDefault"].get("KMSMasterKeyID") %}
<KMSMasterKeyID>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] }}</KMSMasterKeyID>
{% endif %}
</ApplyServerSideEncryptionByDefault>
<BucketKeyEnabled>{{ 'true' if entry["Rule"].get("BucketKeyEnabled") == 'true' else 'false' }}</BucketKeyEnabled>
</Rule>
{% endfor %}
</ServerSideEncryptionConfiguration>
"""
S3_INVALID_PRESIGNED_PARAMETERS = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>SignatureDoesNotMatch</Code>
<Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.</Message>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_ENCRYPTION = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ServerSideEncryptionConfigurationNotFoundError</Code>
<Message>The server side encryption configuration was not found</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for topic in config.topic %}
<TopicConfiguration>
<Id>{{ topic.id }}</Id>
<Topic>{{ topic.arn }}</Topic>
{% for event in topic.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if topic.filters %}
<Filter>
<S3Key>
{% for rule in topic.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</TopicConfiguration>
{% endfor %}
{% for queue in config.queue %}
<QueueConfiguration>
<Id>{{ queue.id }}</Id>
<Queue>{{ queue.arn }}</Queue>
{% for event in queue.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if queue.filters %}
<Filter>
<S3Key>
{% for rule in queue.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</QueueConfiguration>
{% endfor %}
{% for cf in config.cloud_function %}
<CloudFunctionConfiguration>
<Id>{{ cf.id }}</Id>
<CloudFunction>{{ cf.arn }}</CloudFunction>
{% for event in cf.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if cf.filters %}
<Filter>
<S3Key>
{% for rule in cf.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</CloudFunctionConfiguration>
{% endfor %}
</NotificationConfiguration>
"""
S3_BUCKET_ACCELERATE = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket.accelerate_configuration }}</Status>
</AccelerateConfiguration>
"""
S3_BUCKET_ACCELERATE_NOT_SET = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
"""
S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION = """
<PublicAccessBlockConfiguration>
<BlockPublicAcls>{{public_block_config.block_public_acls}}</BlockPublicAcls>
<IgnorePublicAcls>{{public_block_config.ignore_public_acls}}</IgnorePublicAcls>
<BlockPublicPolicy>{{public_block_config.block_public_policy}}</BlockPublicPolicy>
<RestrictPublicBuckets>{{public_block_config.restrict_public_buckets}}</RestrictPublicBuckets>
</PublicAccessBlockConfiguration>
"""
S3_BUCKET_LOCK_CONFIGURATION = """
<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{%if lock_enabled %}
<ObjectLockEnabled>Enabled</ObjectLockEnabled>
{% else %}
<ObjectLockEnabled>Disabled</ObjectLockEnabled>
{% endif %}
{% if mode %}
<Rule>
<DefaultRetention>
<Mode>{{mode}}</Mode>
<Days>{{days}}</Days>
<Years>{{years}}</Years>
</DefaultRetention>
</Rule>
{% endif %}
</ObjectLockConfiguration>
"""
S3_DUPLICATE_BUCKET_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>BucketAlreadyOwnedByYou</Code>
<Message>Your previous request to create the named bucket succeeded and you already own it.</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
""" | def _invalid_headers(self, url, headers):
"""
Verify whether the provided metadata in the URL is also present in the headers
:param url: .../file.txt&content-type=app%2Fjson&Signature=..
:param headers: Content-Type=app/json
:return: True or False
"""
metadata_to_check = {
"content-disposition": "Content-Disposition",
"content-encoding": "Content-Encoding",
"content-language": "Content-Language",
"content-length": "Content-Length",
"content-md5": "Content-MD5",
"content-type": "Content-Type",
}
for url_key, header_key in metadata_to_check.items():
metadata_in_url = re.search(url_key + "=(.+?)(&.+$|$)", url)
if metadata_in_url:
url_value = unquote(metadata_in_url.group(1))
if header_key not in headers or (url_value != headers[header_key]):
return True
return False | 1,994 | 2,015 | from __future__ import unicode_literals
import io
import os
import re
import sys
from botocore.awsrequest import AWSPreparedRequest
from moto.core.utils import (
amzn_request_id,
str_to_rfc_1123_datetime,
py2_strip_unicode_keys,
)
from urllib.parse import (
parse_qs,
parse_qsl,
urlparse,
unquote,
urlencode,
urlunparse,
)
import xmltodict
from moto.packages.httpretty.core import HTTPrettyRequest
from moto.core.responses import _TemplateEnvironmentMixin, ActionAuthenticatorMixin
from moto.core.utils import path_url
from moto.core import ACCOUNT_ID
from moto.settings import S3_IGNORE_SUBDOMAIN_BUCKETNAME
from moto.s3bucket_path.utils import (
bucket_name_from_url as bucketpath_bucket_name_from_url,
parse_key_name as bucketpath_parse_key_name,
is_delete_keys as bucketpath_is_delete_keys,
)
from .exceptions import (
BucketAlreadyExists,
BucketMustHaveLockeEnabled,
DuplicateTagKeys,
InvalidContentMD5,
InvalidContinuationToken,
S3ClientError,
MissingBucket,
MissingKey,
MissingVersion,
InvalidMaxPartArgument,
InvalidPartOrder,
MalformedXML,
MalformedACLError,
IllegalLocationConstraintException,
InvalidNotificationARN,
InvalidNotificationEvent,
ObjectNotInActiveTierError,
NoSystemTags,
PreconditionFailed,
InvalidRange,
LockNotEnabled,
)
from .models import (
s3_backend,
get_canned_acl,
FakeGrantee,
FakeGrant,
FakeAcl,
FakeKey,
)
from .utils import (
bucket_name_from_url,
clean_key_name,
metadata_from_headers,
parse_region_from_url,
)
from xml.dom import minidom
DEFAULT_REGION_NAME = "us-east-1"
ACTION_MAP = {
"BUCKET": {
"HEAD": {"DEFAULT": "HeadBucket",},
"GET": {
"uploads": "ListBucketMultipartUploads",
"location": "GetBucketLocation",
"lifecycle": "GetLifecycleConfiguration",
"versioning": "GetBucketVersioning",
"policy": "GetBucketPolicy",
"website": "GetBucketWebsite",
"acl": "GetBucketAcl",
"tagging": "GetBucketTagging",
"logging": "GetBucketLogging",
"cors": "GetBucketCORS",
"notification": "GetBucketNotification",
"accelerate": "GetAccelerateConfiguration",
"versions": "ListBucketVersions",
"public_access_block": "GetPublicAccessBlock",
"DEFAULT": "ListBucket",
},
"PUT": {
"lifecycle": "PutLifecycleConfiguration",
"versioning": "PutBucketVersioning",
"policy": "PutBucketPolicy",
"website": "PutBucketWebsite",
"acl": "PutBucketAcl",
"tagging": "PutBucketTagging",
"logging": "PutBucketLogging",
"cors": "PutBucketCORS",
"notification": "PutBucketNotification",
"accelerate": "PutAccelerateConfiguration",
"public_access_block": "PutPublicAccessBlock",
"DEFAULT": "CreateBucket",
},
"DELETE": {
"lifecycle": "PutLifecycleConfiguration",
"policy": "DeleteBucketPolicy",
"website": "DeleteBucketWebsite",
"tagging": "PutBucketTagging",
"cors": "PutBucketCORS",
"public_access_block": "DeletePublicAccessBlock",
"DEFAULT": "DeleteBucket",
},
},
"KEY": {
"HEAD": {"DEFAULT": "HeadObject",},
"GET": {
"uploadId": "ListMultipartUploadParts",
"acl": "GetObjectAcl",
"tagging": "GetObjectTagging",
"versionId": "GetObjectVersion",
"DEFAULT": "GetObject",
},
"PUT": {
"acl": "PutObjectAcl",
"tagging": "PutObjectTagging",
"DEFAULT": "PutObject",
},
"DELETE": {
"uploadId": "AbortMultipartUpload",
"versionId": "DeleteObjectVersion",
"DEFAULT": "DeleteObject",
},
"POST": {
"uploads": "PutObject",
"restore": "RestoreObject",
"uploadId": "PutObject",
},
},
"CONTROL": {
"GET": {"publicAccessBlock": "GetPublicAccessBlock"},
"PUT": {"publicAccessBlock": "PutPublicAccessBlock"},
"DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"},
},
}
def parse_key_name(pth):
# strip the first '/' left by urlparse
return pth[1:] if pth.startswith("/") else pth
def is_delete_keys(request, path, bucket_name):
# GOlang sends a request as url/?delete= (treating it as a normal key=value, even if the value is empty)
# Python sends a request as url/?delete (treating it as a flag)
# https://github.com/spulec/moto/issues/2937
return (
path == "/?delete"
or path == "/?delete="
or (path == "/" and getattr(request, "query_string", "") == "delete")
)
class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
def __init__(self, backend):
super(ResponseObject, self).__init__()
self.backend = backend
self.method = ""
self.path = ""
self.data = {}
self.headers = {}
@property
def should_autoescape(self):
return True
def all_buckets(self):
self.data["Action"] = "ListAllMyBuckets"
self._authenticate_and_authorize_s3_action()
# No bucket specified. Listing all buckets
all_buckets = self.backend.list_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def subdomain_based_buckets(self, request):
if S3_IGNORE_SUBDOMAIN_BUCKETNAME:
return False
host = request.headers.get("host", request.headers.get("Host"))
if not host:
host = urlparse(request.url).netloc
if (
not host
or host.startswith("localhost")
or host.startswith("localstack")
or re.match(r"^[^.]+$", host)
or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host)
):
# Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev),
# (3) local host names that do not contain a "." (e.g., Docker container host names), or
# (4) kubernetes host names
return False
match = re.match(r"^([^\[\]:]+)(:\d+)?$", host)
if match:
match = re.match(
r"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}", match.groups()[0]
)
if match:
return False
match = re.match(r"^\[(.+)\](:\d+)?$", host)
if match:
match = re.match(
r"^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z",
match.groups()[0],
re.IGNORECASE,
)
if match:
return False
path_based = host == "s3.amazonaws.com" or re.match(
r"s3[\.\-]([^.]*)\.amazonaws\.com", host
)
return not path_based
def is_delete_keys(self, request, path, bucket_name):
if self.subdomain_based_buckets(request):
return is_delete_keys(request, path, bucket_name)
else:
return bucketpath_is_delete_keys(request, path, bucket_name)
def parse_bucket_name_from_url(self, request, url):
if self.subdomain_based_buckets(request):
return bucket_name_from_url(url)
else:
return bucketpath_bucket_name_from_url(url)
def parse_key_name(self, request, url):
if self.subdomain_based_buckets(request):
return parse_key_name(url)
else:
return bucketpath_parse_key_name(url)
def ambiguous_response(self, request, full_url, headers):
# Depending on which calling format the client is using, we don't know
# if this is a bucket or key request so we have to check
if self.subdomain_based_buckets(request):
return self.key_or_control_response(request, full_url, headers)
else:
# Using path-based buckets
return self.bucket_response(request, full_url, headers)
@amzn_request_id
def bucket_response(self, request, full_url, headers):
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
try:
response = self._bucket_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
return self._send_response(response)
@staticmethod
def _send_response(response):
if isinstance(response, str):
return 200, {}, response.encode("utf-8")
else:
status_code, headers, response_content = response
if not isinstance(response_content, bytes):
response_content = response_content.encode("utf-8")
return status_code, headers, response_content
def _bucket_response(self, request, full_url, headers):
querystring = self._get_querystring(full_url)
method = request.method
region_name = parse_region_from_url(full_url)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
self.data["BucketName"] = bucket_name
if hasattr(request, "body"):
# Boto
body = request.body
else:
# Flask server
body = request.data
if body is None:
body = b""
if isinstance(body, bytes):
body = body.decode("utf-8")
body = "{0}".format(body).encode("utf-8")
if method == "HEAD":
return self._bucket_response_head(bucket_name, querystring)
elif method == "GET":
return self._bucket_response_get(bucket_name, querystring)
elif method == "PUT":
return self._bucket_response_put(
request, body, region_name, bucket_name, querystring
)
elif method == "DELETE":
return self._bucket_response_delete(body, bucket_name, querystring)
elif method == "POST":
return self._bucket_response_post(request, body, bucket_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
@staticmethod
def _get_querystring(full_url):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
return querystring
def _bucket_response_head(self, bucket_name, querystring):
self._set_action("BUCKET", "HEAD", querystring)
self._authenticate_and_authorize_s3_action()
try:
self.backend.head_bucket(bucket_name)
except MissingBucket:
# Unless we do this, boto3 does not raise ClientError on
# HEAD (which the real API responds with), and instead
# raises NoSuchBucket, leading to inconsistency in
# error response between real and mocked responses.
return 404, {}, ""
return 200, {}, ""
def _bucket_response_get(self, bucket_name, querystring):
self._set_action("BUCKET", "GET", querystring)
self._authenticate_and_authorize_s3_action()
if "object-lock" in querystring:
(
lock_enabled,
mode,
days,
years,
) = self.backend.get_object_lock_configuration(bucket_name)
template = self.response_template(S3_BUCKET_LOCK_CONFIGURATION)
return template.render(
lock_enabled=lock_enabled, mode=mode, days=days, years=years,
)
if "uploads" in querystring:
for unsup in ("delimiter", "max-uploads"):
if unsup in querystring:
raise NotImplementedError(
"Listing multipart uploads with {} has not been implemented yet.".format(
unsup
)
)
multiparts = list(self.backend.get_all_multiparts(bucket_name).values())
if "prefix" in querystring:
prefix = querystring.get("prefix", [None])[0]
multiparts = [
upload
for upload in multiparts
if upload.key_name.startswith(prefix)
]
template = self.response_template(S3_ALL_MULTIPARTS)
return template.render(bucket_name=bucket_name, uploads=multiparts)
elif "location" in querystring:
location = self.backend.get_bucket_location(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
# us-east-1 is different - returns a None location
if location == DEFAULT_REGION_NAME:
location = None
return template.render(location=location)
elif "lifecycle" in querystring:
rules = self.backend.get_bucket_lifecycle(bucket_name)
if not rules:
template = self.response_template(S3_NO_LIFECYCLE)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION)
return template.render(rules=rules)
elif "versioning" in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
return template.render(status=versioning)
elif "policy" in querystring:
policy = self.backend.get_bucket_policy(bucket_name)
if not policy:
template = self.response_template(S3_NO_POLICY)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, policy
elif "website" in querystring:
website_configuration = self.backend.get_bucket_website_configuration(
bucket_name
)
if not website_configuration:
template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, website_configuration
elif "acl" in querystring:
acl = self.backend.get_bucket_acl(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(acl=acl)
elif "tagging" in querystring:
tags = self.backend.get_bucket_tagging(bucket_name)["Tags"]
# "Special Error" if no tags:
if len(tags) == 0:
template = self.response_template(S3_NO_BUCKET_TAGGING)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return template.render(tags=tags)
elif "logging" in querystring:
logging = self.backend.get_bucket_logging(bucket_name)
if not logging:
template = self.response_template(S3_NO_LOGGING_CONFIG)
return 200, {}, template.render()
template = self.response_template(S3_LOGGING_CONFIG)
return 200, {}, template.render(logging=logging)
elif "cors" in querystring:
cors = self.backend.get_bucket_cors(bucket_name)
if len(cors) == 0:
template = self.response_template(S3_NO_CORS_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_CORS_RESPONSE)
return template.render(cors=cors)
elif "notification" in querystring:
notification_configuration = self.backend.get_bucket_notification_configuration(
bucket_name
)
if not notification_configuration:
return 200, {}, ""
template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG)
return template.render(config=notification_configuration)
elif "accelerate" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if bucket.accelerate_configuration is None:
template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET)
return 200, {}, template.render()
template = self.response_template(S3_BUCKET_ACCELERATE)
return template.render(bucket=bucket)
elif "publicAccessBlock" in querystring:
public_block_config = self.backend.get_public_access_block(bucket_name)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return template.render(public_block_config=public_block_config)
elif "versions" in querystring:
delimiter = querystring.get("delimiter", [None])[0]
encoding_type = querystring.get("encoding-type", [None])[0]
key_marker = querystring.get("key-marker", [None])[0]
max_keys = querystring.get("max-keys", [None])[0]
prefix = querystring.get("prefix", [""])[0]
version_id_marker = querystring.get("version-id-marker", [None])[0]
bucket = self.backend.get_bucket(bucket_name)
(
versions,
common_prefixes,
delete_markers,
) = self.backend.list_object_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
key_marker=key_marker,
max_keys=max_keys,
version_id_marker=version_id_marker,
prefix=prefix,
)
key_list = versions
template = self.response_template(S3_BUCKET_GET_VERSIONS)
return (
200,
{},
template.render(
common_prefixes=common_prefixes,
key_list=key_list,
delete_marker_list=delete_markers,
bucket=bucket,
prefix=prefix,
max_keys=1000,
delimiter=delimiter,
key_marker=key_marker,
is_truncated="false",
),
)
elif "encryption" in querystring:
encryption = self.backend.get_bucket_encryption(bucket_name)
if not encryption:
template = self.response_template(S3_NO_ENCRYPTION)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_ENCRYPTION_CONFIG)
return 200, {}, template.render(encryption=encryption)
elif querystring.get("list-type", [None])[0] == "2":
return 200, {}, self._handle_list_objects_v2(bucket_name, querystring)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
marker = querystring.get("marker", [None])[0]
result_keys, result_folders = self.backend.list_objects(
bucket, prefix, delimiter
)
if marker:
result_keys = self._get_results_from_token(result_keys, marker)
result_keys, is_truncated, next_marker = self._truncate_result(
result_keys, max_keys
)
template = self.response_template(S3_BUCKET_GET_RESPONSE)
return (
200,
{},
template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders,
is_truncated=is_truncated,
next_marker=next_marker,
max_keys=max_keys,
),
)
def _set_action(self, action_resource_type, method, querystring):
action_set = False
for action_in_querystring, action in ACTION_MAP[action_resource_type][
method
].items():
if action_in_querystring in querystring:
self.data["Action"] = action
action_set = True
if not action_set:
self.data["Action"] = ACTION_MAP[action_resource_type][method]["DEFAULT"]
def _handle_list_objects_v2(self, bucket_name, querystring):
template = self.response_template(S3_BUCKET_GET_RESPONSE_V2)
bucket = self.backend.get_bucket(bucket_name)
continuation_token = querystring.get("continuation-token", [None])[0]
if continuation_token is not None and continuation_token == "":
raise InvalidContinuationToken()
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
all_keys = self.backend.list_objects_v2(bucket, prefix, delimiter)
fetch_owner = querystring.get("fetch-owner", [False])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
start_after = querystring.get("start-after", [None])[0]
if continuation_token or start_after:
limit = continuation_token or start_after
all_keys = self._get_results_from_token(all_keys, limit)
truncated_keys, is_truncated, next_continuation_token = self._truncate_result(
all_keys, max_keys
)
result_keys, result_folders = self._split_truncated_keys(truncated_keys)
key_count = len(result_keys) + len(result_folders)
return template.render(
bucket=bucket,
prefix=prefix or "",
delimiter=delimiter,
key_count=key_count,
result_keys=result_keys,
result_folders=result_folders,
fetch_owner=fetch_owner,
max_keys=max_keys,
is_truncated=is_truncated,
next_continuation_token=next_continuation_token,
start_after=None if continuation_token else start_after,
)
@staticmethod
def _split_truncated_keys(truncated_keys):
result_keys = []
result_folders = []
for key in truncated_keys:
if isinstance(key, FakeKey):
result_keys.append(key)
else:
result_folders.append(key)
return result_keys, result_folders
def _get_results_from_token(self, result_keys, token):
continuation_index = 0
for key in result_keys:
if (key.name if isinstance(key, FakeKey) else key) > token:
break
continuation_index += 1
return result_keys[continuation_index:]
def _truncate_result(self, result_keys, max_keys):
if max_keys == 0:
result_keys = []
is_truncated = True
next_continuation_token = None
elif len(result_keys) > max_keys:
is_truncated = "true"
result_keys = result_keys[:max_keys]
item = result_keys[-1]
next_continuation_token = item.name if isinstance(item, FakeKey) else item
else:
is_truncated = "false"
next_continuation_token = None
return result_keys, is_truncated, next_continuation_token
def _body_contains_location_constraint(self, body):
if body:
try:
xmltodict.parse(body)["CreateBucketConfiguration"]["LocationConstraint"]
return True
except KeyError:
pass
return False
def _create_bucket_configuration_is_empty(self, body):
if body:
try:
create_bucket_configuration = xmltodict.parse(body)[
"CreateBucketConfiguration"
]
del create_bucket_configuration["@xmlns"]
if len(create_bucket_configuration) == 0:
return True
except KeyError:
pass
return False
def _parse_pab_config(self, body):
parsed_xml = xmltodict.parse(body)
parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None)
# If Python 2, fix the unicode strings:
if sys.version_info[0] < 3:
parsed_xml = {
"PublicAccessBlockConfiguration": py2_strip_unicode_keys(
dict(parsed_xml["PublicAccessBlockConfiguration"])
)
}
return parsed_xml
def _bucket_response_put(
self, request, body, region_name, bucket_name, querystring
):
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
self._set_action("BUCKET", "PUT", querystring)
self._authenticate_and_authorize_s3_action()
if "object-lock" in querystring:
body_decoded = body.decode()
config = self._lock_config_from_xml(body_decoded)
if not self.backend.get_bucket(bucket_name).object_lock_enabled:
raise BucketMustHaveLockeEnabled
self.backend.put_object_lock_configuration(
bucket_name,
config.get("enabled"),
config.get("mode"),
config.get("days"),
config.get("years"),
)
return 200, {}, ""
if "versioning" in querystring:
ver = re.search("<Status>([A-Za-z]+)</Status>", body.decode())
if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING)
return template.render(bucket_versioning_status=ver.group(1))
else:
return 404, {}, ""
elif "lifecycle" in querystring:
rules = xmltodict.parse(body)["LifecycleConfiguration"]["Rule"]
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.put_bucket_lifecycle(bucket_name, rules)
return ""
elif "policy" in querystring:
self.backend.put_bucket_policy(bucket_name, body)
return "True"
elif "acl" in querystring:
# Headers are first. If not set, then look at the body (consistent with the documentation):
acls = self._acl_from_headers(request.headers)
if not acls:
acls = self._acl_from_xml(body)
self.backend.put_bucket_acl(bucket_name, acls)
return ""
elif "tagging" in querystring:
tagging = self._bucket_tagging_from_xml(body)
self.backend.put_bucket_tagging(bucket_name, tagging)
return ""
elif "website" in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
return ""
elif "cors" in querystring:
try:
self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body))
return ""
except KeyError:
raise MalformedXML()
elif "logging" in querystring:
try:
self.backend.put_bucket_logging(
bucket_name, self._logging_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
elif "notification" in querystring:
try:
self.backend.put_bucket_notification_configuration(
bucket_name, self._notification_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "accelerate" in querystring:
try:
accelerate_status = self._accelerate_config_from_xml(body)
self.backend.put_bucket_accelerate_configuration(
bucket_name, accelerate_status
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "publicAccessBlock" in querystring:
pab_config = self._parse_pab_config(body)
self.backend.put_bucket_public_access_block(
bucket_name, pab_config["PublicAccessBlockConfiguration"]
)
return ""
elif "encryption" in querystring:
try:
self.backend.put_bucket_encryption(
bucket_name, self._encryption_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else:
# us-east-1, the default AWS region behaves a bit differently
# - you should not use it as a location constraint --> it fails
# - querying the location constraint returns None
# - LocationConstraint has to be specified if outside us-east-1
if (
region_name != DEFAULT_REGION_NAME
and not self._body_contains_location_constraint(body)
):
raise IllegalLocationConstraintException()
if body:
if self._create_bucket_configuration_is_empty(body):
raise MalformedXML()
try:
forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][
"LocationConstraint"
]
if forced_region == DEFAULT_REGION_NAME:
raise S3ClientError(
"InvalidLocationConstraint",
"The specified location-constraint is not valid",
)
else:
region_name = forced_region
except KeyError:
pass
try:
new_bucket = self.backend.create_bucket(bucket_name, region_name)
except BucketAlreadyExists:
new_bucket = self.backend.get_bucket(bucket_name)
if (
new_bucket.region_name == DEFAULT_REGION_NAME
and region_name == DEFAULT_REGION_NAME
):
# us-east-1 has different behavior - creating a bucket there is an idempotent operation
pass
else:
template = self.response_template(S3_DUPLICATE_BUCKET_ERROR)
return 409, {}, template.render(bucket_name=bucket_name)
if "x-amz-acl" in request.headers:
# TODO: Support the XML-based ACL format
self.backend.put_bucket_acl(
bucket_name, self._acl_from_headers(request.headers)
)
if (
request.headers.get("x-amz-bucket-object-lock-enabled", "").lower()
== "true"
):
new_bucket.object_lock_enabled = True
new_bucket.versioning_status = "Enabled"
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, {}, template.render(bucket=new_bucket)
def _bucket_response_delete(self, body, bucket_name, querystring):
self._set_action("BUCKET", "DELETE", querystring)
self._authenticate_and_authorize_s3_action()
if "policy" in querystring:
self.backend.delete_bucket_policy(bucket_name, body)
return 204, {}, ""
elif "tagging" in querystring:
self.backend.delete_bucket_tagging(bucket_name)
return 204, {}, ""
elif "website" in querystring:
self.backend.delete_bucket_website(bucket_name)
return 204, {}, ""
elif "cors" in querystring:
self.backend.delete_bucket_cors(bucket_name)
return 204, {}, ""
elif "lifecycle" in querystring:
self.backend.delete_bucket_lifecycle(bucket_name)
return 204, {}, ""
elif "publicAccessBlock" in querystring:
self.backend.delete_public_access_block(bucket_name)
return 204, {}, ""
elif "encryption" in querystring:
self.backend.delete_bucket_encryption(bucket_name)
return 204, {}, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
template = self.response_template(S3_DELETE_BUCKET_SUCCESS)
return 204, {}, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, {}, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, body, bucket_name):
response_headers = {}
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
path = self._get_path(request)
if self.is_delete_keys(request, path, bucket_name):
self.data["Action"] = "DeleteObject"
self._authenticate_and_authorize_s3_action()
return self._bucket_response_delete_keys(request, body, bucket_name)
self.data["Action"] = "PutObject"
self._authenticate_and_authorize_s3_action()
# POST to bucket-url should create file from form
if hasattr(request, "form"):
# Not HTTPretty
form = request.form
else:
# HTTPretty, build new form object
body = body.decode()
form = dict(parse_qsl(body))
key = form["key"]
if "file" in form:
f = form["file"]
else:
fobj = request.files["file"]
f = fobj.stream.read()
key = key.replace("${filename}", os.path.basename(fobj.filename))
if "success_action_redirect" in form:
redirect = form["success_action_redirect"]
parts = urlparse(redirect)
queryargs = parse_qs(parts.query)
queryargs["key"] = key
queryargs["bucket"] = bucket_name
redirect_queryargs = urlencode(queryargs, doseq=True)
newparts = (
parts.scheme,
parts.netloc,
parts.path,
parts.params,
redirect_queryargs,
parts.fragment,
)
fixed_redirect = urlunparse(newparts)
response_headers["Location"] = fixed_redirect
if "success_action_status" in form:
status_code = form["success_action_status"]
elif "success_action_redirect" in form:
status_code = 303
else:
status_code = 204
new_key = self.backend.put_object(bucket_name, key, f)
if form.get("acl"):
acl = get_canned_acl(form.get("acl"))
new_key.set_acl(acl)
# Metadata
metadata = metadata_from_headers(form)
new_key.set_metadata(metadata)
return status_code, response_headers, ""
@staticmethod
def _get_path(request):
if isinstance(request, HTTPrettyRequest):
path = request.path
else:
path = (
request.full_path
if hasattr(request, "full_path")
else path_url(request.url)
)
return path
def _bucket_response_delete_keys(self, request, body, bucket_name):
template = self.response_template(S3_DELETE_KEYS_RESPONSE)
body_dict = xmltodict.parse(body, strip_whitespace=False)
objects = body_dict["Delete"].get("Object", [])
if not isinstance(objects, list):
# We expect a list of objects, but when there is a single <Object> node xmltodict does not
# return a list.
objects = [objects]
if len(objects) == 0:
raise MalformedXML()
deleted_objects = self.backend.delete_objects(bucket_name, objects)
error_names = []
return (
200,
{},
template.render(deleted=deleted_objects, delete_errors=error_names),
)
def _handle_range_header(self, request, headers, response_content):
response_headers = {}
length = len(response_content)
last = length - 1
_, rspec = request.headers.get("range").split("=")
if "," in rspec:
raise NotImplementedError("Multiple range specifiers not supported")
def toint(i):
return int(i) if i else None
begin, end = map(toint, rspec.split("-"))
if begin is not None: # byte range
end = last if end is None else min(end, last)
elif end is not None: # suffix byte range
begin = length - min(end, length)
end = last
else:
return 400, response_headers, ""
if begin < 0 or end > last or begin > min(end, last):
raise InvalidRange(
actual_size=str(length), range_requested=request.headers.get("range")
)
response_headers["content-range"] = "bytes {0}-{1}/{2}".format(
begin, end, length
)
content = response_content[begin : end + 1]
response_headers["content-length"] = len(content)
return 206, response_headers, content
def _handle_v4_chunk_signatures(self, body, content_length):
body_io = io.BytesIO(body)
new_body = bytearray(content_length)
pos = 0
line = body_io.readline()
while line:
# https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
# str(hex(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
chunk_size = int(line[: line.find(b";")].decode("utf8"), 16)
new_body[pos : pos + chunk_size] = body_io.read(chunk_size)
pos = pos + chunk_size
body_io.read(2) # skip trailing \r\n
line = body_io.readline()
return bytes(new_body)
@amzn_request_id
def key_or_control_response(self, request, full_url, headers):
# Key and Control are lumped in because splitting out the regex is too much of a pain :/
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
response_headers = {}
try:
# Is this an S3 control response?
if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url:
response = self._control_response(request, full_url, headers)
else:
response = self._key_response(request, full_url, self.headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
if isinstance(response, str):
status_code = 200
response_content = response
else:
status_code, response_headers, response_content = response
if (
status_code == 200
and "range" in request.headers
and request.headers["range"] != ""
):
try:
return self._handle_range_header(
request, response_headers, response_content
)
except S3ClientError as s3error:
return s3error.code, {}, s3error.description
return status_code, response_headers, response_content
def _control_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if body is None:
body = b""
if method == "GET":
return self._control_response_get(request, query, headers)
elif method == "PUT":
return self._control_response_put(request, body, query, headers)
elif method == "DELETE":
return self._control_response_delete(request, query, headers)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _control_response_get(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "GET", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
public_block_config = self.backend.get_account_public_access_block(
headers["x-amz-account-id"]
)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return (
200,
response_headers,
template.render(public_block_config=public_block_config),
)
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_put(self, request, body, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "PUT", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
pab_config = self._parse_pab_config(body)
self.backend.put_account_public_access_block(
headers["x-amz-account-id"],
pab_config["PublicAccessBlockConfiguration"],
)
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_delete(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "DELETE", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
self.backend.delete_account_public_access_block(headers["x-amz-account-id"])
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
key_name = self.parse_key_name(request, parsed_url.path)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
# Because we patch the requests library the boto/boto3 API
# requests go through this method but so do
# `requests.get("https://bucket-name.s3.amazonaws.com/file-name")`
# Here we deny public access to private files by checking the
# ACL and checking for the mere presence of an Authorization
# header.
if "Authorization" not in request.headers:
if hasattr(request, "url"):
signed_url = "Signature=" in request.url
elif hasattr(request, "requestline"):
signed_url = "Signature=" in request.path
key = self.backend.get_object(bucket_name, key_name)
if key:
if not key.acl.public_read and not signed_url:
return 403, {}, ""
elif signed_url:
# coming in from requests.get(s3.generate_presigned_url())
if self._invalid_headers(request.url, dict(request.headers)):
return 403, {}, S3_INVALID_PRESIGNED_PARAMETERS
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if not body:
# when the data is being passed as a file
if request.files:
for _, value in request.files.items():
body = value.stream.read()
elif hasattr(request, "form"):
# Body comes through as part of the form, if no content-type is set on the PUT-request
# form = ImmutableMultiDict([('some data 123 321', '')])
form = request.form
for k, _ in form.items():
body = k
if body is None:
body = b""
if (
request.headers.get("x-amz-content-sha256", None)
== "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
):
body = self._handle_v4_chunk_signatures(
body, int(request.headers["x-amz-decoded-content-length"])
)
if method == "GET":
return self._key_response_get(
bucket_name, query, key_name, headers=request.headers
)
elif method == "PUT":
return self._key_response_put(
request, body, bucket_name, query, key_name, headers
)
elif method == "HEAD":
return self._key_response_head(
bucket_name, query, key_name, headers=request.headers
)
elif method == "DELETE":
return self._key_response_delete(headers, bucket_name, query, key_name)
elif method == "POST":
return self._key_response_post(request, body, bucket_name, query, key_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _key_response_get(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "GET", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId"):
upload_id = query["uploadId"][0]
# 0 <= PartNumberMarker <= 2,147,483,647
part_number_marker = int(query.get("part-number-marker", [0])[0])
if not (0 <= part_number_marker <= 2147483647):
raise InvalidMaxPartArgument("part-number-marker", 0, 2147483647)
# 0 <= MaxParts <= 2,147,483,647 (default is 1,000)
max_parts = int(query.get("max-parts", [1000])[0])
if not (0 <= max_parts <= 2147483647):
raise InvalidMaxPartArgument("max-parts", 0, 2147483647)
parts = self.backend.list_parts(
bucket_name,
upload_id,
part_number_marker=part_number_marker,
max_parts=max_parts,
)
next_part_number_marker = parts[-1].name + 1 if parts else 0
is_truncated = parts and self.backend.is_truncated(
bucket_name, upload_id, next_part_number_marker
)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
return (
200,
response_headers,
template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
is_truncated=str(is_truncated).lower(),
max_parts=max_parts,
next_part_number_marker=next_part_number_marker,
parts=parts,
part_number_marker=part_number_marker,
),
)
version_id = query.get("versionId", [None])[0]
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
if key is None and version_id is None:
raise MissingKey(key_name)
elif key is None:
raise MissingVersion()
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
raise PreconditionFailed("If-Unmodified-Since")
if if_match and key.etag not in [if_match, '"{0}"'.format(if_match)]:
raise PreconditionFailed("If-Match")
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
if "acl" in query:
acl = s3_backend.get_object_acl(key)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, response_headers, template.render(acl=acl)
if "tagging" in query:
tags = self.backend.get_object_tagging(key)["Tags"]
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return 200, response_headers, template.render(tags=tags)
if "legal-hold" in query:
legal_hold = self.backend.get_object_legal_hold(key)
template = self.response_template(S3_OBJECT_LEGAL_HOLD)
return 200, response_headers, template.render(legal_hold=legal_hold)
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
return 200, response_headers, key.value
def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
self._set_action("KEY", "PUT", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId") and query.get("partNumber"):
upload_id = query["uploadId"][0]
part_number = int(query["partNumber"][0])
if "x-amz-copy-source" in request.headers:
src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/")
src_bucket, src_key = src.split("/", 1)
src_key, src_version_id = (
src_key.split("?versionId=")
if "?versionId=" in src_key
else (src_key, None)
)
src_range = request.headers.get("x-amz-copy-source-range", "").split(
"bytes="
)[-1]
try:
start_byte, end_byte = src_range.split("-")
start_byte, end_byte = int(start_byte), int(end_byte)
except ValueError:
start_byte, end_byte = None, None
if self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
):
key = self.backend.copy_part(
bucket_name,
upload_id,
part_number,
src_bucket,
src_key,
src_version_id,
start_byte,
end_byte,
)
else:
return 404, response_headers, ""
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.upload_part(
bucket_name, upload_id, part_number, body
)
response = ""
response_headers.update(key.response_dict)
return 200, response_headers, response
storage_class = request.headers.get("x-amz-storage-class", "STANDARD")
encryption = request.headers.get("x-amz-server-side-encryption", None)
kms_key_id = request.headers.get(
"x-amz-server-side-encryption-aws-kms-key-id", None
)
bucket_key_enabled = request.headers.get(
"x-amz-server-side-encryption-bucket-key-enabled", None
)
if bucket_key_enabled is not None:
bucket_key_enabled = str(bucket_key_enabled).lower()
bucket = self.backend.get_bucket(bucket_name)
lock_enabled = bucket.object_lock_enabled
lock_mode = request.headers.get("x-amz-object-lock-mode", None)
lock_until = request.headers.get("x-amz-object-lock-retain-until-date", None)
legal_hold = request.headers.get("x-amz-object-lock-legal-hold", "OFF")
if lock_mode or lock_until or legal_hold == "ON":
if not request.headers.get("Content-Md5"):
raise InvalidContentMD5
if not lock_enabled:
raise LockNotEnabled
elif lock_enabled and bucket.has_default_lock:
if not request.headers.get("Content-Md5"):
raise InvalidContentMD5
lock_until = bucket.default_retention()
lock_mode = bucket.default_lock_mode
acl = self._acl_from_headers(request.headers)
if acl is None:
acl = self.backend.get_bucket(bucket_name).acl
tagging = self._tagging_from_headers(request.headers)
if "retention" in query:
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
retention = self._mode_until_from_xml(body)
self.backend.put_object_retention(
bucket_name, key_name, version_id=version_id, retention=retention
)
return 200, response_headers, ""
if "legal-hold" in query:
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
legal_hold_status = self._legal_hold_status_from_xml(body)
self.backend.put_object_legal_hold(
bucket_name, key_name, version_id, legal_hold_status
)
return 200, response_headers, ""
if "acl" in query:
self.backend.put_object_acl(bucket_name, key_name, acl)
return 200, response_headers, ""
if "tagging" in query:
if "versionId" in query:
version_id = query["versionId"][0]
else:
version_id = None
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
tagging = self._tagging_from_xml(body)
self.backend.set_key_tags(key, tagging, key_name)
return 200, response_headers, ""
if "x-amz-copy-source" in request.headers:
# Copy key
# you can have a quoted ?version=abc with a version Id, so work on
# we need to parse the unquoted string first
src_key = request.headers.get("x-amz-copy-source")
if isinstance(src_key, bytes):
src_key = src_key.decode("utf-8")
src_key_parsed = urlparse(src_key)
src_bucket, src_key = (
clean_key_name(src_key_parsed.path).lstrip("/").split("/", 1)
)
src_version_id = parse_qs(src_key_parsed.query).get("versionId", [None])[0]
key = self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
)
if key is not None:
if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]:
if key.response_dict.get(
"x-amz-restore"
) is None or 'ongoing-request="true"' in key.response_dict.get(
"x-amz-restore"
):
raise ObjectNotInActiveTierError(key)
self.backend.copy_object(
src_bucket,
src_key,
bucket_name,
key_name,
storage=storage_class,
acl=acl,
src_version_id=src_version_id,
)
else:
return 404, response_headers, ""
new_key = self.backend.get_object(bucket_name, key_name)
mdirective = request.headers.get("x-amz-metadata-directive")
if mdirective is not None and mdirective == "REPLACE":
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True)
tdirective = request.headers.get("x-amz-tagging-directive")
if tdirective == "REPLACE":
tagging = self._tagging_from_headers(request.headers)
self.backend.set_key_tags(new_key, tagging)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
streaming_request = hasattr(request, "streaming") and request.streaming
closing_connection = headers.get("connection") == "close"
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_object(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.put_object(
bucket_name,
key_name,
body,
storage=storage_class,
encryption=encryption,
kms_key_id=kms_key_id,
bucket_key_enabled=bucket_key_enabled,
lock_mode=lock_mode,
lock_legal_status=legal_hold,
lock_until=lock_until,
)
request.streaming = True
metadata = metadata_from_headers(request.headers)
metadata.update(metadata_from_headers(query))
new_key.set_metadata(metadata)
new_key.set_acl(acl)
new_key.website_redirect_location = request.headers.get(
"x-amz-website-redirect-location"
)
self.backend.set_key_tags(new_key, tagging)
response_headers.update(new_key.response_dict)
return 200, response_headers, ""
def _key_response_head(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "HEAD", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
version_id = query.get("versionId", [None])[0]
part_number = query.get("partNumber", [None])[0]
if part_number:
part_number = int(part_number)
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.head_object(
bucket_name, key_name, version_id=version_id, part_number=part_number
)
if key:
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
return 412, response_headers, ""
if if_match and key.etag != if_match:
return 412, response_headers, ""
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
return 200, response_headers, ""
else:
return 404, response_headers, ""
def _lock_config_from_xml(self, xml):
response_dict = {"enabled": False, "mode": None, "days": None, "years": None}
parsed_xml = xmltodict.parse(xml)
enabled = (
parsed_xml["ObjectLockConfiguration"]["ObjectLockEnabled"] == "Enabled"
)
response_dict["enabled"] = enabled
default_retention = parsed_xml.get("ObjectLockConfiguration").get("Rule")
if default_retention:
default_retention = default_retention.get("DefaultRetention")
mode = default_retention["Mode"]
days = int(default_retention.get("Days", 0))
years = int(default_retention.get("Years", 0))
if days and years:
raise MalformedXML
response_dict["mode"] = mode
response_dict["days"] = days
response_dict["years"] = years
return response_dict
def _acl_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml.get("AccessControlPolicy"):
raise MalformedACLError()
# The owner is needed for some reason...
if not parsed_xml["AccessControlPolicy"].get("Owner"):
# TODO: Validate that the Owner is actually correct.
raise MalformedACLError()
# If empty, then no ACLs:
if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None:
return []
if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"):
raise MalformedACLError()
permissions = ["READ", "WRITE", "READ_ACP", "WRITE_ACP", "FULL_CONTROL"]
if not isinstance(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list
):
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = [
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"]
]
grants = self._get_grants_from_xml(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"],
MalformedACLError,
permissions,
)
return FakeAcl(grants)
def _get_grants_from_xml(self, grant_list, exception_type, permissions):
grants = []
for grant in grant_list:
if grant.get("Permission", "") not in permissions:
raise exception_type()
if grant["Grantee"].get("@xsi:type", "") not in [
"CanonicalUser",
"AmazonCustomerByEmail",
"Group",
]:
raise exception_type()
# TODO: Verify that the proper grantee data is supplied based on the type.
grants.append(
FakeGrant(
[
FakeGrantee(
id=grant["Grantee"].get("ID", ""),
display_name=grant["Grantee"].get("DisplayName", ""),
uri=grant["Grantee"].get("URI", ""),
)
],
[grant["Permission"]],
)
)
return grants
def _acl_from_headers(self, headers):
canned_acl = headers.get("x-amz-acl", "")
if canned_acl:
return get_canned_acl(canned_acl)
grants = []
for header, value in headers.items():
header = header.lower()
if not header.startswith("x-amz-grant-"):
continue
permission = {
"read": "READ",
"write": "WRITE",
"read-acp": "READ_ACP",
"write-acp": "WRITE_ACP",
"full-control": "FULL_CONTROL",
}[header[len("x-amz-grant-") :]]
grantees = []
for key_and_value in value.split(","):
key, value = re.match(
'([^=]+)="?([^"]+)"?', key_and_value.strip()
).groups()
if key.lower() == "id":
grantees.append(FakeGrantee(id=value))
else:
grantees.append(FakeGrantee(uri=value))
grants.append(FakeGrant(grantees, [permission]))
if grants:
return FakeAcl(grants)
else:
return None
def _tagging_from_headers(self, headers):
tags = {}
if headers.get("x-amz-tagging"):
parsed_header = parse_qs(headers["x-amz-tagging"], keep_blank_values=True)
for tag in parsed_header.items():
tags[tag[0]] = tag[1][0]
return tags
def _tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml, force_list={"Tag": True})
tags = {}
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
tags[tag["Key"]] = tag["Value"]
return tags
def _bucket_tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
tags = {}
# Optional if no tags are being sent:
if parsed_xml["Tagging"].get("TagSet"):
# If there is only 1 tag, then it's not a list:
if not isinstance(parsed_xml["Tagging"]["TagSet"]["Tag"], list):
tags[parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"]] = parsed_xml[
"Tagging"
]["TagSet"]["Tag"]["Value"]
else:
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
if tag["Key"] in tags:
raise DuplicateTagKeys()
tags[tag["Key"]] = tag["Value"]
# Verify that "aws:" is not in the tags. If so, then this is a problem:
for key, _ in tags.items():
if key.startswith("aws:"):
raise NoSystemTags()
return tags
def _cors_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if isinstance(parsed_xml["CORSConfiguration"]["CORSRule"], list):
return [cors for cors in parsed_xml["CORSConfiguration"]["CORSRule"]]
return [parsed_xml["CORSConfiguration"]["CORSRule"]]
def _mode_until_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
return (
parsed_xml["Retention"]["Mode"],
parsed_xml["Retention"]["RetainUntilDate"],
)
def _legal_hold_status_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
return parsed_xml["LegalHold"]["Status"]
def _encryption_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if (
not parsed_xml["ServerSideEncryptionConfiguration"].get("Rule")
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"].get(
"ApplyServerSideEncryptionByDefault"
)
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"][
"ApplyServerSideEncryptionByDefault"
].get("SSEAlgorithm")
):
raise MalformedXML()
return [parsed_xml["ServerSideEncryptionConfiguration"]]
def _logging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"):
return {}
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"):
raise MalformedXML()
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"):
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = ""
# Get the ACLs:
if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"):
permissions = ["READ", "WRITE", "FULL_CONTROL"]
if not isinstance(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
list,
):
target_grants = self._get_grants_from_xml(
[
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
]["Grant"]
],
MalformedXML,
permissions,
)
else:
target_grants = self._get_grants_from_xml(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
MalformedXML,
permissions,
)
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
] = target_grants
return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]
def _notification_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not len(parsed_xml["NotificationConfiguration"]):
return {}
# The types of notifications, and their required fields (apparently lambda is categorized by the API as
# "CloudFunction"):
notification_fields = [
("Topic", "sns"),
("Queue", "sqs"),
("CloudFunction", "lambda"),
]
event_names = [
"s3:ReducedRedundancyLostObject",
"s3:ObjectCreated:*",
"s3:ObjectCreated:Put",
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:CompleteMultipartUpload",
"s3:ObjectRemoved:*",
"s3:ObjectRemoved:Delete",
"s3:ObjectRemoved:DeleteMarkerCreated",
]
found_notifications = (
0 # Tripwire -- if this is not ever set, then there were no notifications
)
for name, arn_string in notification_fields:
# 1st verify that the proper notification configuration has been passed in (with an ARN that is close
# to being correct -- nothing too complex in the ARN logic):
the_notification = parsed_xml["NotificationConfiguration"].get(
"{}Configuration".format(name)
)
if the_notification:
found_notifications += 1
if not isinstance(the_notification, list):
the_notification = parsed_xml["NotificationConfiguration"][
"{}Configuration".format(name)
] = [the_notification]
for n in the_notification:
if not n[name].startswith("arn:aws:{}:".format(arn_string)):
raise InvalidNotificationARN()
# 2nd, verify that the Events list is correct:
assert n["Event"]
if not isinstance(n["Event"], list):
n["Event"] = [n["Event"]]
for event in n["Event"]:
if event not in event_names:
raise InvalidNotificationEvent()
# Parse out the filters:
if n.get("Filter"):
# Error if S3Key is blank:
if not n["Filter"]["S3Key"]:
raise KeyError()
if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list):
n["Filter"]["S3Key"]["FilterRule"] = [
n["Filter"]["S3Key"]["FilterRule"]
]
for filter_rule in n["Filter"]["S3Key"]["FilterRule"]:
assert filter_rule["Name"] in ["suffix", "prefix"]
assert filter_rule["Value"]
if not found_notifications:
return {}
return parsed_xml["NotificationConfiguration"]
def _accelerate_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
config = parsed_xml["AccelerateConfiguration"]
return config["Status"]
def _key_response_delete(self, headers, bucket_name, query, key_name):
self._set_action("KEY", "DELETE", query)
self._authenticate_and_authorize_s3_action()
if query.get("uploadId"):
upload_id = query["uploadId"][0]
self.backend.abort_multipart_upload(bucket_name, upload_id)
return 204, {}, ""
version_id = query.get("versionId", [None])[0]
if "tagging" in query:
self.backend.delete_object_tagging(
bucket_name, key_name, version_id=version_id
)
template = self.response_template(S3_DELETE_KEY_TAGGING_RESPONSE)
return 204, {}, template.render(version_id=version_id)
bypass = headers.get("X-Amz-Bypass-Governance-Retention")
success, response_meta = self.backend.delete_object(
bucket_name, key_name, version_id=version_id, bypass=bypass
)
response_headers = {}
if response_meta is not None:
for k in response_meta:
response_headers["x-amz-{}".format(k)] = response_meta[k]
return 204, response_headers, ""
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName("Part")
prev = 0
for p in ps:
pn = int(p.getElementsByTagName("PartNumber")[0].firstChild.wholeText)
if pn <= prev:
raise InvalidPartOrder()
yield (pn, p.getElementsByTagName("ETag")[0].firstChild.wholeText)
def _key_response_post(self, request, body, bucket_name, query, key_name):
self._set_action("KEY", "POST", query)
self._authenticate_and_authorize_s3_action()
if body == b"" and "uploads" in query:
metadata = metadata_from_headers(request.headers)
storage_type = request.headers.get("x-amz-storage-class", "STANDARD")
multipart_id = self.backend.create_multipart_upload(
bucket_name, key_name, metadata, storage_type
)
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name, key_name=key_name, upload_id=multipart_id
)
return 200, {}, response
if query.get("uploadId"):
body = self._complete_multipart_body(body)
multipart_id = query["uploadId"][0]
multipart, value, etag = self.backend.complete_multipart_upload(
bucket_name, multipart_id, body
)
if value is None:
return 400, {}, ""
key = self.backend.put_object(
bucket_name,
multipart.key_name,
value,
storage=multipart.storage,
etag=etag,
multipart=multipart,
)
key.set_metadata(multipart.metadata)
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
headers = {}
if key.version_id:
headers["x-amz-version-id"] = key.version_id
return (
200,
headers,
template.render(
bucket_name=bucket_name, key_name=key.name, etag=key.etag
),
)
elif "restore" in query:
es = minidom.parseString(body).getElementsByTagName("Days")
days = es[0].childNodes[0].wholeText
key = self.backend.get_object(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, {}, ""
else:
raise NotImplementedError(
"Method POST had only been implemented for multipart uploads and restore operations, so far"
)
def _invalid_headers(self, url, headers):
"""
Verify whether the provided metadata in the URL is also present in the headers
:param url: .../file.txt&content-type=app%2Fjson&Signature=..
:param headers: Content-Type=app/json
:return: True or False
"""
metadata_to_check = {
"content-disposition": "Content-Disposition",
"content-encoding": "Content-Encoding",
"content-language": "Content-Language",
"content-length": "Content-Length",
"content-md5": "Content-MD5",
"content-type": "Content-Type",
}
for url_key, header_key in metadata_to_check.items():
metadata_in_url = re.search(url_key + "=(.+?)(&.+$|$)", url)
if metadata_in_url:
url_value = unquote(metadata_in_url.group(1))
if header_key not in headers or (url_value != headers[header_key]):
return True
return False
S3ResponseInstance = ResponseObject(s3_backend)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f41161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>{{ bucket.creation_date_ISO8601 }}</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
</ListAllMyBucketsResult>"""
S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_marker %}
<NextMarker>{{ next_marker }}</NextMarker>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_GET_RESPONSE_V2 = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
<KeyCount>{{ key_count }}</KeyCount>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_continuation_token %}
<NextContinuationToken>{{ next_continuation_token }}</NextContinuationToken>
{% endif %}
{% if start_after %}
<StartAfter>{{ start_after }}</StartAfter>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
{% if fetch_owner %}
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
{% endif %}
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<CreateBucketResponse>
<Bucket>{{ bucket.name }}</Bucket>
</CreateBucketResponse>
</CreateBucketResponse>"""
S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteBucketResponse>
<Code>204</Code>
<Description>No Content</Description>
</DeleteBucketResponse>
</DeleteBucketResponse>"""
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketNotEmpty</Code>
<Message>The bucket you tried to delete is not empty</Message>
<BucketName>{{ bucket.name }}</BucketName>
<RequestId>asdfasdfsdafds</RequestId>
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
</Error>"""
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{% if location != None %}{{ location }}{% endif %}</LocationConstraint>"""
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for rule in rules %}
<Rule>
<ID>{{ rule.id }}</ID>
{% if rule.filter %}
<Filter>
{% if rule.filter.prefix != None %}
<Prefix>{{ rule.filter.prefix }}</Prefix>
{% endif %}
{% if rule.filter.tag_key %}
<Tag>
<Key>{{ rule.filter.tag_key }}</Key>
<Value>{{ rule.filter.tag_value }}</Value>
</Tag>
{% endif %}
{% if rule.filter.and_filter %}
<And>
{% if rule.filter.and_filter.prefix != None %}
<Prefix>{{ rule.filter.and_filter.prefix }}</Prefix>
{% endif %}
{% for key, value in rule.filter.and_filter.tags.items() %}
<Tag>
<Key>{{ key }}</Key>
<Value>{{ value }}</Value>
</Tag>
{% endfor %}
</And>
{% endif %}
</Filter>
{% else %}
{% if rule.prefix != None %}
<Prefix>{{ rule.prefix }}</Prefix>
{% endif %}
{% endif %}
<Status>{{ rule.status }}</Status>
{% if rule.storage_class %}
<Transition>
{% if rule.transition_days %}
<Days>{{ rule.transition_days }}</Days>
{% endif %}
{% if rule.transition_date %}
<Date>{{ rule.transition_date }}</Date>
{% endif %}
<StorageClass>{{ rule.storage_class }}</StorageClass>
</Transition>
{% endif %}
{% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %}
<Expiration>
{% if rule.expiration_days %}
<Days>{{ rule.expiration_days }}</Days>
{% endif %}
{% if rule.expiration_date %}
<Date>{{ rule.expiration_date }}</Date>
{% endif %}
{% if rule.expired_object_delete_marker %}
<ExpiredObjectDeleteMarker>{{ rule.expired_object_delete_marker }}</ExpiredObjectDeleteMarker>
{% endif %}
</Expiration>
{% endif %}
{% if rule.nvt_noncurrent_days and rule.nvt_storage_class %}
<NoncurrentVersionTransition>
<NoncurrentDays>{{ rule.nvt_noncurrent_days }}</NoncurrentDays>
<StorageClass>{{ rule.nvt_storage_class }}</StorageClass>
</NoncurrentVersionTransition>
{% endif %}
{% if rule.nve_noncurrent_days %}
<NoncurrentVersionExpiration>
<NoncurrentDays>{{ rule.nve_noncurrent_days }}</NoncurrentDays>
</NoncurrentVersionExpiration>
{% endif %}
{% if rule.aimu_days %}
<AbortIncompleteMultipartUpload>
<DaysAfterInitiation>{{ rule.aimu_days }}</DaysAfterInitiation>
</AbortIncompleteMultipartUpload>
{% endif %}
</Rule>
{% endfor %}
</LifecycleConfiguration>
"""
S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
{% if common_prefixes %}
{% for prefix in common_prefixes %}
<CommonPrefixes>
<Prefix>{{ prefix }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
<Delimiter>{{ delimiter }}</Delimiter>
<KeyMarker>{{ key_marker or "" }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %}</VersionId>
<IsLatest>{{ 'true' if key.is_latest else 'false' }}</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
{% for marker in delete_marker_list %}
<DeleteMarker>
<Key>{{ marker.name }}</Key>
<VersionId>{{ marker.version_id }}</VersionId>
<IsLatest>{{ 'true' if marker.is_latest else 'false' }}</IsLatest>
<LastModified>{{ marker.last_modified_ISO8601 }}</LastModified>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</DeleteMarker>
{% endfor %}
</ListVersionsResult>
"""
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k, v in deleted %}
<Deleted>
<Key>{{k}}</Key>
{% if v %}<VersionId>{{v}}</VersionId>{% endif %}
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
S3_DELETE_KEY_TAGGING_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteObjectTaggingResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<VersionId>{{version_id}}</VersionId>
</DeleteObjectTaggingResult>
"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grantee.type }}">
{% if grantee.uri %}
<URI>{{ grantee.uri }}</URI>
{% endif %}
{% if grantee.id %}
<ID>{{ grantee.id }}</ID>
{% endif %}
{% if grantee.display_name %}
<DisplayName>{{ grantee.display_name }}</DisplayName>
{% endif %}
</Grantee>
{% endfor %}
{% for permission in grant.permissions %}
<Permission>{{ permission }}</Permission>
{% endfor %}
</Grant>
{% endfor %}
</AccessControlList>
</AccessControlPolicy>"""
S3_OBJECT_LEGAL_HOLD = """<?xml version="1.0" encoding="UTF-8"?>
<LegalHold>
<Status>{{ legal_hold }}</Status>
</LegalHold>
"""
S3_OBJECT_TAGGING_RESPONSE = """\
<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
{% for tag in tags %}
<Tag>
<Key>{{ tag.Key }}</Key>
<Value>{{ tag.Value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_CORS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration>
{% for cors in cors %}
<CORSRule>
{% for origin in cors.allowed_origins %}
<AllowedOrigin>{{ origin }}</AllowedOrigin>
{% endfor %}
{% for method in cors.allowed_methods %}
<AllowedMethod>{{ method }}</AllowedMethod>
{% endfor %}
{% if cors.allowed_headers is not none %}
{% for header in cors.allowed_headers %}
<AllowedHeader>{{ header }}</AllowedHeader>
{% endfor %}
{% endif %}
{% if cors.exposed_headers is not none %}
{% for header in cors.exposed_headers %}
<ExposedHeader>{{ header }}</ExposedHeader>
{% endfor %}
{% endif %}
{% if cors.max_age_seconds is not none %}
<MaxAgeSeconds>{{ cors.max_age_seconds }}</MaxAgeSeconds>
{% endif %}
</CORSRule>
{% endfor %}
</CORSConfiguration>
"""
S3_OBJECT_COPY_RESPONSE = """\
<CopyObjectResult xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</CopyObjectResult>"""
S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
</InitiateMultipartUploadResult>"""
S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
</CopyPartResult>"""
S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
<StorageClass>STANDARD</StorageClass>
<Initiator>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<PartNumberMarker>{{ part_number_marker }}</PartNumberMarker>
<NextPartNumberMarker>{{ next_part_number_marker }}</NextPartNumberMarker>
<MaxParts>{{ max_parts }}</MaxParts>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for part in parts %}
<Part>
<PartNumber>{{ part.name }}</PartNumber>
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
<Size>{{ part.size }}</Size>
</Part>
{% endfor %}
</ListPartsResult>"""
S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location>
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<ETag>{{ etag }}</ETag>
</CompleteMultipartUploadResult>
"""
S3_ALL_MULTIPARTS = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<KeyMarker></KeyMarker>
<UploadIdMarker></UploadIdMarker>
<MaxUploads>1000</MaxUploads>
<IsTruncated>false</IsTruncated>
{% for upload in uploads %}
<Upload>
<Key>{{ upload.key_name }}</Key>
<UploadId>{{ upload.id }}</UploadId>
<Initiator>
<ID>arn:aws:iam::"""
+ ACCOUNT_ID
+ """:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID>
<DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated>
</Upload>
{% endfor %}
</ListMultipartUploadsResult>
"""
)
S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucketPolicy</Code>
<Message>The bucket policy does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_LIFECYCLE = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchLifecycleConfiguration</Code>
<Message>The lifecycle configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_TAGGING = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchTagSet</Code>
<Message>The TagSet does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_WEBSITE_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_INVALID_CORS_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_CORS_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchCORSConfiguration</Code>
<Message>The CORS configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<LoggingEnabled>
<TargetBucket>{{ logging["TargetBucket"] }}</TargetBucket>
<TargetPrefix>{{ logging["TargetPrefix"] }}</TargetPrefix>
{% if logging.get("TargetGrants") %}
<TargetGrants>
{% for grant in logging["TargetGrants"] %}
<Grant>
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grant.grantees[0].type }}">
{% if grant.grantees[0].uri %}
<URI>{{ grant.grantees[0].uri }}</URI>
{% endif %}
{% if grant.grantees[0].id %}
<ID>{{ grant.grantees[0].id }}</ID>
{% endif %}
{% if grant.grantees[0].display_name %}
<DisplayName>{{ grant.grantees[0].display_name }}</DisplayName>
{% endif %}
</Grantee>
<Permission>{{ grant.permissions[0] }}</Permission>
</Grant>
{% endfor %}
</TargetGrants>
{% endif %}
</LoggingEnabled>
</BucketLoggingStatus>
"""
S3_NO_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />
"""
S3_ENCRYPTION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<ServerSideEncryptionConfiguration xmlns="http://doc.s3.amazonaws.com/2006-03-01">
{% for entry in encryption %}
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] }}</SSEAlgorithm>
{% if entry["Rule"]["ApplyServerSideEncryptionByDefault"].get("KMSMasterKeyID") %}
<KMSMasterKeyID>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] }}</KMSMasterKeyID>
{% endif %}
</ApplyServerSideEncryptionByDefault>
<BucketKeyEnabled>{{ 'true' if entry["Rule"].get("BucketKeyEnabled") == 'true' else 'false' }}</BucketKeyEnabled>
</Rule>
{% endfor %}
</ServerSideEncryptionConfiguration>
"""
S3_INVALID_PRESIGNED_PARAMETERS = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>SignatureDoesNotMatch</Code>
<Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.</Message>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_ENCRYPTION = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ServerSideEncryptionConfigurationNotFoundError</Code>
<Message>The server side encryption configuration was not found</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for topic in config.topic %}
<TopicConfiguration>
<Id>{{ topic.id }}</Id>
<Topic>{{ topic.arn }}</Topic>
{% for event in topic.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if topic.filters %}
<Filter>
<S3Key>
{% for rule in topic.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</TopicConfiguration>
{% endfor %}
{% for queue in config.queue %}
<QueueConfiguration>
<Id>{{ queue.id }}</Id>
<Queue>{{ queue.arn }}</Queue>
{% for event in queue.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if queue.filters %}
<Filter>
<S3Key>
{% for rule in queue.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</QueueConfiguration>
{% endfor %}
{% for cf in config.cloud_function %}
<CloudFunctionConfiguration>
<Id>{{ cf.id }}</Id>
<CloudFunction>{{ cf.arn }}</CloudFunction>
{% for event in cf.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if cf.filters %}
<Filter>
<S3Key>
{% for rule in cf.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</CloudFunctionConfiguration>
{% endfor %}
</NotificationConfiguration>
"""
S3_BUCKET_ACCELERATE = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket.accelerate_configuration }}</Status>
</AccelerateConfiguration>
"""
S3_BUCKET_ACCELERATE_NOT_SET = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
"""
S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION = """
<PublicAccessBlockConfiguration>
<BlockPublicAcls>{{public_block_config.block_public_acls}}</BlockPublicAcls>
<IgnorePublicAcls>{{public_block_config.ignore_public_acls}}</IgnorePublicAcls>
<BlockPublicPolicy>{{public_block_config.block_public_policy}}</BlockPublicPolicy>
<RestrictPublicBuckets>{{public_block_config.restrict_public_buckets}}</RestrictPublicBuckets>
</PublicAccessBlockConfiguration>
"""
S3_BUCKET_LOCK_CONFIGURATION = """
<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{%if lock_enabled %}
<ObjectLockEnabled>Enabled</ObjectLockEnabled>
{% else %}
<ObjectLockEnabled>Disabled</ObjectLockEnabled>
{% endif %}
{% if mode %}
<Rule>
<DefaultRetention>
<Mode>{{mode}}</Mode>
<Days>{{days}}</Days>
<Years>{{years}}</Years>
</DefaultRetention>
</Rule>
{% endif %}
</ObjectLockConfiguration>
"""
S3_DUPLICATE_BUCKET_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>BucketAlreadyOwnedByYou</Code>
<Message>Your previous request to create the named bucket succeeded and you already own it.</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
|
__init__ | Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and
[Wilson2017reparam]_
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
# MASKED: __init__ function (lines 42-73)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations.
"""
pass # pragma: no cover
class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
class qNoisyExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`.
"""
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
# TODO (T41248036): Implement more efficient way to compute posterior
# over both training and test points in GPyTorch
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple
regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0) | def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending) | 42 | 73 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and
[Wilson2017reparam]_
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations.
"""
pass # pragma: no cover
class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
class qNoisyExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`.
"""
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
# TODO (T41248036): Implement more efficient way to compute posterior
# over both training and test points in GPyTorch
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple
regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0)
|
__init__ | q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient. | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and
[Wilson2017reparam]_
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations.
"""
pass # pragma: no cover
class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
# MASKED: __init__ function (lines 104-131)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
class qNoisyExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`.
"""
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
# TODO (T41248036): Implement more efficient way to compute posterior
# over both training and test points in GPyTorch
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple
regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0) | def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f) | 104 | 131 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and
[Wilson2017reparam]_
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations.
"""
pass # pragma: no cover
class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
class qNoisyExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`.
"""
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
# TODO (T41248036): Implement more efficient way to compute posterior
# over both training and test points in GPyTorch
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple
regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0)
|
forward | Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`. | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and
[Wilson2017reparam]_
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations.
"""
pass # pragma: no cover
class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
# MASKED: forward function (lines 133-151)
class qNoisyExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`.
"""
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
# TODO (T41248036): Implement more efficient way to compute posterior
# over both training and test points in GPyTorch
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple
regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0) | @concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei | 133 | 151 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and
[Wilson2017reparam]_
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations.
"""
pass # pragma: no cover
class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
class qNoisyExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`.
"""
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
# TODO (T41248036): Implement more efficient way to compute posterior
# over both training and test points in GPyTorch
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple
regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0)
|
__init__ | q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function. | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and
[Wilson2017reparam]_
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations.
"""
pass # pragma: no cover
class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
class qNoisyExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
# MASKED: __init__ function (lines 172-210)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`.
"""
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
# TODO (T41248036): Implement more efficient way to compute posterior
# over both training and test points in GPyTorch
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple
regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0) | def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline) | 172 | 210 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and
[Wilson2017reparam]_
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations.
"""
pass # pragma: no cover
class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
class qNoisyExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`.
"""
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
# TODO (T41248036): Implement more efficient way to compute posterior
# over both training and test points in GPyTorch
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple
regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.